pax_global_header00006660000000000000000000000064146072035560014522gustar00rootroot0000000000000052 comment=2601bb0d3c67fc00cdbd5763cc2361d2a363f0ee go-generics-cache-1.5.1/000077500000000000000000000000001460720355600147715ustar00rootroot00000000000000go-generics-cache-1.5.1/.github/000077500000000000000000000000001460720355600163315ustar00rootroot00000000000000go-generics-cache-1.5.1/.github/FUNDING.yml000066400000000000000000000000211460720355600201370ustar00rootroot00000000000000github: Code-Hex go-generics-cache-1.5.1/.github/workflows/000077500000000000000000000000001460720355600203665ustar00rootroot00000000000000go-generics-cache-1.5.1/.github/workflows/test.yml000066400000000000000000000014111460720355600220650ustar00rootroot00000000000000on: push: branches: - "main" tags: - "v*.*.*" pull_request: jobs: test: name: Test runs-on: ubuntu-latest strategy: fail-fast: false matrix: go: - '^1.18' - '^1.19' - '^1.20' - '^1.21' - '^1.22' steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} cache: true - name: Test Coverage (pkg) run: go test ./... -race -coverprofile="coverage.txt" - name: Upload coverage if: ${{ matrix.go == '^1.22' }} uses: codecov/codecov-action@v4 with: file: "coverage.txt" token: ${{ secrets.CODECOV_TOKEN }} go-generics-cache-1.5.1/LICENSE000066400000000000000000000020501460720355600157730ustar00rootroot00000000000000MIT License Copyright (c) 2021 codehex Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. go-generics-cache-1.5.1/README.md000066400000000000000000000072421460720355600162550ustar00rootroot00000000000000# go-generics-cache [![.github/workflows/test.yml](https://github.com/Code-Hex/go-generics-cache/actions/workflows/test.yml/badge.svg)](https://github.com/Code-Hex/go-generics-cache/actions/workflows/test.yml) [![codecov](https://codecov.io/gh/Code-Hex/go-generics-cache/branch/main/graph/badge.svg?token=Wm7UEwgiZu)](https://codecov.io/gh/Code-Hex/go-generics-cache) [![Go Reference](https://pkg.go.dev/badge/github.com/Code-Hex/go-generics-cache.svg)](https://pkg.go.dev/github.com/Code-Hex/go-generics-cache) go-generics-cache is an in-memory key:value store/cache that is suitable for applications running on a single machine. This in-memory cache uses [Go Generics](https://go.dev/blog/generics-proposal) which is introduced in 1.18. - a thread-safe - implemented with [Go Generics](https://go.dev/blog/generics-proposal) - TTL supported (with expiration times) - Simple cache is like `map[string]interface{}` - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/simple/example_test.go) - Cache replacement policies - **Least recently used (LRU)** - Discards the least recently used items first. - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/lru/example_test.go) - **Least-frequently used (LFU)** - Counts how often an item is needed. Those that are used least often are discarded first. - [An O(1) algorithm for implementing the LFU cache eviction scheme](http://dhruvbird.com/lfu.pdf) - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/lfu/example_test.go) - **First in first out (FIFO)** - Using this algorithm the cache behaves in the same way as a [FIFO queue](https://en.wikipedia.org/wiki/FIFO_(computing_and_electronics)). - The cache evicts the blocks in the order they were added, without any regard to how often or how many times they were accessed before. - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/fifo/example_test.go) - **Most recently used (MRU)** - In contrast to Least Recently Used (LRU), MRU discards the most recently used items first. - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/mru/example_test.go) - **Clock** - Clock is a more efficient version of FIFO than Second-chance cache algorithm. - See [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/policy/clock/example_test.go) ## Requirements Go 1.18 or later. ## Install $ go get github.com/Code-Hex/go-generics-cache ## Usage See also [examples](https://github.com/Code-Hex/go-generics-cache/blob/main/example_test.go) or [go playground](https://go.dev/play/p/kDs-6wpRAcX) ```go package main import ( "context" "fmt" "time" cache "github.com/Code-Hex/go-generics-cache" ) func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // use simple cache algorithm without options. c := cache.NewContext[string, int](ctx) c.Set("a", 1) gota, aok := c.Get("a") gotb, bok := c.Get("b") fmt.Println(gota, aok) // 1 true fmt.Println(gotb, bok) // 0 false // Create a cache for Number constraint. key as string, value as int. nc := cache.NewNumber[string, int]() nc.Set("age", 26, cache.WithExpiration(time.Hour)) incremented := nc.Increment("age", 1) fmt.Println(incremented) // 27 decremented := nc.Decrement("age", 1) fmt.Println(decremented) // 26 } ``` ## Articles - English: [Some tips and bothers for Go 1.18 Generics](https://dev.to/codehex/some-tips-and-bothers-for-go-118-generics-lc7) - Japanese: [Go 1.18 の Generics を使ったキャッシュライブラリを作った時に見つけた tips と微妙な点](https://zenn.dev/codehex/articles/3e6935ee6d853e) go-generics-cache-1.5.1/cache.go000066400000000000000000000225751460720355600163760ustar00rootroot00000000000000package cache import ( "context" "sync" "time" "github.com/Code-Hex/go-generics-cache/policy/clock" "github.com/Code-Hex/go-generics-cache/policy/fifo" "github.com/Code-Hex/go-generics-cache/policy/lfu" "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/Code-Hex/go-generics-cache/policy/mru" "github.com/Code-Hex/go-generics-cache/policy/simple" ) // Interface is a common-cache interface. type Interface[K comparable, V any] interface { // Get looks up a key's value from the cache. Get(key K) (value V, ok bool) // Set sets a value to the cache with key. replacing any existing value. Set(key K, val V) // Keys returns the keys of the cache. The order is relied on algorithms. Keys() []K // Delete deletes the item with provided key from the cache. Delete(key K) // Len returns the number of items in the cache. Len() int } var ( _ = []Interface[struct{}, any]{ (*simple.Cache[struct{}, any])(nil), (*lru.Cache[struct{}, any])(nil), (*lfu.Cache[struct{}, any])(nil), (*fifo.Cache[struct{}, any])(nil), (*mru.Cache[struct{}, any])(nil), (*clock.Cache[struct{}, any])(nil), } ) // Item is an item type Item[K comparable, V any] struct { Key K Value V Expiration time.Time InitialReferenceCount int } func (item *Item[K, V]) hasExpiration() bool { return !item.Expiration.IsZero() } // Expired returns true if the item has expired. func (item *Item[K, V]) Expired() bool { if !item.hasExpiration() { return false } return nowFunc().After(item.Expiration) } // GetReferenceCount returns reference count to be used when setting // the cache item for the first time. func (item *Item[K, V]) GetReferenceCount() int { return item.InitialReferenceCount } var nowFunc = time.Now // ItemOption is an option for cache item. type ItemOption func(*itemOptions) type itemOptions struct { expiration time.Time // default none referenceCount int } // WithExpiration is an option to set expiration time for any items. // If the expiration is zero or negative value, it treats as w/o expiration. func WithExpiration(exp time.Duration) ItemOption { return func(o *itemOptions) { o.expiration = nowFunc().Add(exp) } } // WithReferenceCount is an option to set reference count for any items. // This option is only applicable to cache policies that have a reference count (e.g., Clock, LFU). // referenceCount specifies the reference count value to set for the cache item. // // the default is 1. func WithReferenceCount(referenceCount int) ItemOption { return func(o *itemOptions) { o.referenceCount = referenceCount } } // newItem creates a new item with specified any options. func newItem[K comparable, V any](key K, val V, opts ...ItemOption) *Item[K, V] { o := new(itemOptions) for _, optFunc := range opts { optFunc(o) } return &Item[K, V]{ Key: key, Value: val, Expiration: o.expiration, InitialReferenceCount: o.referenceCount, } } // Cache is a thread safe cache. type Cache[K comparable, V any] struct { cache Interface[K, *Item[K, V]] // mu is used to do lock in some method process. mu sync.Mutex janitor *janitor expManager *expirationManager[K] } // Option is an option for cache. type Option[K comparable, V any] func(*options[K, V]) type options[K comparable, V any] struct { cache Interface[K, *Item[K, V]] janitorInterval time.Duration } func newOptions[K comparable, V any]() *options[K, V] { return &options[K, V]{ cache: simple.NewCache[K, *Item[K, V]](), janitorInterval: time.Minute, } } // AsLRU is an option to make a new Cache as LRU algorithm. func AsLRU[K comparable, V any](opts ...lru.Option) Option[K, V] { return func(o *options[K, V]) { o.cache = lru.NewCache[K, *Item[K, V]](opts...) } } // AsLFU is an option to make a new Cache as LFU algorithm. func AsLFU[K comparable, V any](opts ...lfu.Option) Option[K, V] { return func(o *options[K, V]) { o.cache = lfu.NewCache[K, *Item[K, V]](opts...) } } // AsFIFO is an option to make a new Cache as FIFO algorithm. func AsFIFO[K comparable, V any](opts ...fifo.Option) Option[K, V] { return func(o *options[K, V]) { o.cache = fifo.NewCache[K, *Item[K, V]](opts...) } } // AsMRU is an option to make a new Cache as MRU algorithm. func AsMRU[K comparable, V any](opts ...mru.Option) Option[K, V] { return func(o *options[K, V]) { o.cache = mru.NewCache[K, *Item[K, V]](opts...) } } // AsClock is an option to make a new Cache as clock algorithm. func AsClock[K comparable, V any](opts ...clock.Option) Option[K, V] { return func(o *options[K, V]) { o.cache = clock.NewCache[K, *Item[K, V]](opts...) } } // WithJanitorInterval is an option to specify how often cache should delete expired items. // // Default is 1 minute. func WithJanitorInterval[K comparable, V any](d time.Duration) Option[K, V] { return func(o *options[K, V]) { o.janitorInterval = d } } // New creates a new thread safe Cache. // The janitor will not be stopped which is created by this function. If you // want to stop the janitor gracefully, You should use the `NewContext` function // instead of this. // // There are several Cache replacement policies available with you specified any options. func New[K comparable, V any](opts ...Option[K, V]) *Cache[K, V] { return NewContext(context.Background(), opts...) } // NewContext creates a new thread safe Cache with context. // This function will be stopped an internal janitor when the context is cancelled. // // There are several Cache replacement policies available with you specified any options. func NewContext[K comparable, V any](ctx context.Context, opts ...Option[K, V]) *Cache[K, V] { o := newOptions[K, V]() for _, optFunc := range opts { optFunc(o) } cache := &Cache[K, V]{ cache: o.cache, janitor: newJanitor(ctx, o.janitorInterval), expManager: newExpirationManager[K](), } cache.janitor.run(cache.DeleteExpired) return cache } // Get looks up a key's value from the cache. func (c *Cache[K, V]) Get(key K) (zero V, ok bool) { c.mu.Lock() defer c.mu.Unlock() item, ok := c.cache.Get(key) if !ok { return } // Returns nil if the item has been expired. // Do not delete here and leave it to an external process such as Janitor. if item.Expired() { return zero, false } return item.Value, true } // GetOrSet atomically gets a key's value from the cache, or if the // key is not present, sets the given value. // The loaded result is true if the value was loaded, false if stored. func (c *Cache[K, V]) GetOrSet(key K, val V, opts ...ItemOption) (actual V, loaded bool) { c.mu.Lock() defer c.mu.Unlock() item, ok := c.cache.Get(key) if !ok || item.Expired() { item := newItem(key, val, opts...) c.cache.Set(key, item) return val, false } return item.Value, true } // DeleteExpired all expired items from the cache. func (c *Cache[K, V]) DeleteExpired() { c.mu.Lock() l := c.expManager.len() c.mu.Unlock() evict := func() bool { key := c.expManager.pop() // if is expired, delete it and return nil instead item, ok := c.cache.Get(key) if ok { if item.Expired() { c.cache.Delete(key) return false } c.expManager.update(key, item.Expiration) } return true } for i := 0; i < l; i++ { c.mu.Lock() shouldBreak := evict() c.mu.Unlock() if shouldBreak { break } } } // Set sets a value to the cache with key. replacing any existing value. func (c *Cache[K, V]) Set(key K, val V, opts ...ItemOption) { c.mu.Lock() defer c.mu.Unlock() item := newItem(key, val, opts...) if item.hasExpiration() { c.expManager.update(key, item.Expiration) } c.cache.Set(key, item) } // Keys returns the keys of the cache. the order is relied on algorithms. func (c *Cache[K, V]) Keys() []K { c.mu.Lock() defer c.mu.Unlock() return c.cache.Keys() } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { c.mu.Lock() defer c.mu.Unlock() c.cache.Delete(key) c.expManager.remove(key) } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { c.mu.Lock() defer c.mu.Unlock() return c.cache.Len() } // Contains reports whether key is within cache. func (c *Cache[K, V]) Contains(key K) bool { c.mu.Lock() defer c.mu.Unlock() _, ok := c.cache.Get(key) return ok } // NumberCache is a in-memory cache which is able to store only Number constraint. type NumberCache[K comparable, V Number] struct { *Cache[K, V] // nmu is used to do lock in Increment/Decrement process. // Note that this must be here as a separate mutex because mu in Cache struct is Locked in Get, // and if we call mu.Lock in Increment/Decrement, it will cause deadlock. nmu sync.Mutex } // NewNumber creates a new cache for Number constraint. func NewNumber[K comparable, V Number](opts ...Option[K, V]) *NumberCache[K, V] { return &NumberCache[K, V]{ Cache: New(opts...), } } // Increment an item of type Number constraint by n. // Returns the incremented value. func (nc *NumberCache[K, V]) Increment(key K, n V) V { // In order to avoid lost update, we must lock whole Increment/Decrement process. nc.nmu.Lock() defer nc.nmu.Unlock() got, _ := nc.Cache.Get(key) nv := got + n nc.Cache.Set(key, nv) return nv } // Decrement an item of type Number constraint by n. // Returns the decremented value. func (nc *NumberCache[K, V]) Decrement(key K, n V) V { nc.nmu.Lock() defer nc.nmu.Unlock() got, _ := nc.Cache.Get(key) nv := got - n nc.Cache.Set(key, nv) return nv } go-generics-cache-1.5.1/cache_internal_test.go000066400000000000000000000060401460720355600213160ustar00rootroot00000000000000package cache import ( "context" "testing" "time" ) func TestDeletedCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() nc := NewContext[string, int](ctx) key := "key" nc.Set(key, 1, WithExpiration(-time.Second)) _, ok := nc.cache.Get(key) if !ok { t.Fatal("want true") } nc.DeleteExpired() _, ok = nc.cache.Get(key) if ok { t.Fatal("want false") } } func TestDeleteExpired(t *testing.T) { now := time.Now() restore := func() { nowFunc = time.Now } t.Run("normal", func(t *testing.T) { defer restore() c := New[string, int]() c.Set("0", 0) c.Set("1", 10, WithExpiration(10*time.Millisecond)) c.Set("2", 20, WithExpiration(20*time.Millisecond)) c.Set("3", 30, WithExpiration(30*time.Millisecond)) c.Set("4", 40, WithExpiration(40*time.Millisecond)) c.Set("5", 50) maxItems := c.Len() expItems := 2 for i := 0; i <= maxItems; i++ { nowFunc = func() time.Time { // Advance time to expire some items advanced := time.Duration(i * 10) return now.Add(advanced * time.Millisecond).Add(time.Millisecond) } c.DeleteExpired() got := c.Len() want := max(maxItems-i, expItems) if want != got { t.Errorf("want %d items but got %d", want, got) } } }) t.Run("with remove", func(t *testing.T) { defer restore() c := New[string, int]() c.Set("0", 0) c.Set("1", 10, WithExpiration(10*time.Millisecond)) c.Set("2", 20, WithExpiration(20*time.Millisecond)) c.Delete("1") nowFunc = func() time.Time { return now.Add(30 * time.Millisecond).Add(time.Millisecond) } c.DeleteExpired() keys := c.Keys() want := 1 if want != len(keys) { t.Errorf("want %d items but got %d", want, len(keys)) } }) t.Run("with update", func(t *testing.T) { defer restore() c := New[string, int]() c.Set("0", 0) c.Set("1", 10, WithExpiration(10*time.Millisecond)) c.Set("2", 20, WithExpiration(20*time.Millisecond)) c.Set("1", 30, WithExpiration(30*time.Millisecond)) // update maxItems := c.Len() nowFunc = func() time.Time { return now.Add(10 * time.Millisecond).Add(time.Millisecond) } c.DeleteExpired() got1 := c.Len() want1 := maxItems if want1 != got1 { t.Errorf("want1 %d items but got1 %d", want1, got1) } nowFunc = func() time.Time { return now.Add(30 * time.Millisecond).Add(time.Millisecond) } c.DeleteExpired() got2 := c.Len() want2 := 1 if want2 != got2 { t.Errorf("want2 %d items but got2 %d", want2, got2) } }) t.Run("issue #51", func(t *testing.T) { defer restore() c := New[string, int]() c.Set("1", 10, WithExpiration(10*time.Millisecond)) c.Set("2", 20, WithExpiration(20*time.Millisecond)) c.Set("1", 30, WithExpiration(100*time.Millisecond)) // expected do not expired key "1" nowFunc = func() time.Time { return now.Add(30 * time.Millisecond).Add(time.Millisecond) } c.DeleteExpired() got := c.Len() if want := 1; want != got { t.Errorf("want %d items but got %d", want, got) } }) } func max(x, y int) int { if x < y { return y } return x } go-generics-cache-1.5.1/cache_test.go000066400000000000000000000046731460720355600174340ustar00rootroot00000000000000package cache_test import ( "math/rand" "sync" "testing" "time" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/clock" "github.com/Code-Hex/go-generics-cache/policy/fifo" "github.com/Code-Hex/go-generics-cache/policy/lfu" "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/Code-Hex/go-generics-cache/policy/mru" ) func TestMultiThreadIncr(t *testing.T) { nc := cache.NewNumber[string, int]() nc.Set("counter", 0) var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { _ = nc.Increment("counter", 1) wg.Done() }() } wg.Wait() if counter, _ := nc.Get("counter"); counter != 100 { t.Errorf("want %v but got %v", 100, counter) } } func TestMultiThreadDecr(t *testing.T) { nc := cache.NewNumber[string, int]() nc.Set("counter", 100) var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { _ = nc.Decrement("counter", 1) wg.Done() }() } wg.Wait() if counter, _ := nc.Get("counter"); counter != 0 { t.Errorf("want %v but got %v", 0, counter) } } func TestMultiThread(t *testing.T) { cases := []struct { name string policy cache.Option[int, int] }{ { name: "LRU", policy: cache.AsLRU[int, int](lru.WithCapacity(10)), }, { name: "MRU", policy: cache.AsMRU[int, int](mru.WithCapacity(10)), }, { name: "FIFO", policy: cache.AsFIFO[int, int](fifo.WithCapacity(10)), }, { name: "Clock", policy: cache.AsClock[int, int](clock.WithCapacity(10)), }, { name: "LFU", policy: cache.AsLFU[int, int](lfu.WithCapacity(10)), }, } for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { c := cache.New(tc.policy) var wg sync.WaitGroup for i := int64(0); i < 100; i++ { wg.Add(1) go func(i int64) { defer wg.Done() m := rand.New(rand.NewSource(i)) for n := 0; n < 100; n++ { key := m.Intn(100000) c.Set(key, m.Intn(100000)) c.Get(key) } }(i) } wg.Wait() }) } } func TestCallJanitor(t *testing.T) { c := cache.New( cache.WithJanitorInterval[string, int](100 * time.Millisecond), ) c.Set("1", 10, cache.WithExpiration(10*time.Millisecond)) c.Set("2", 20, cache.WithExpiration(20*time.Millisecond)) c.Set("3", 30, cache.WithExpiration(30*time.Millisecond)) <-time.After(300 * time.Millisecond) keys := c.Keys() if len(keys) != 0 { t.Errorf("want items is empty but got %d", len(keys)) } } go-generics-cache-1.5.1/constraint.go000066400000000000000000000003111460720355600174770ustar00rootroot00000000000000package cache import "golang.org/x/exp/constraints" // Number is a constraint that permits any numeric types. type Number interface { constraints.Integer | constraints.Float | constraints.Complex } go-generics-cache-1.5.1/example_test.go000066400000000000000000000070111460720355600200110ustar00rootroot00000000000000package cache_test import ( "context" "fmt" "time" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lfu" "github.com/Code-Hex/go-generics-cache/policy/lru" ) func ExampleCache() { // use simple cache algorithm without options. c := cache.New[string, int]() c.Set("a", 1) gota, aok := c.Get("a") gotb, bok := c.Get("b") fmt.Println(gota, aok) fmt.Println(gotb, bok) // Output: // 1 true // 0 false } func ExampleNewContext() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // use simple cache algorithm without options. // an internal janitor will be stopped if specified the context is cancelled. c := cache.NewContext(ctx, cache.WithJanitorInterval[string, int](3*time.Second)) c.Set("a", 1) gota, aok := c.Get("a") gotb, bok := c.Get("b") fmt.Println(gota, aok) fmt.Println(gotb, bok) // Output: // 1 true // 0 false } func ExampleAsClock() { // use clock cache algorithm. c := cache.New(cache.AsClock[string, int]()) c.Set("a", 1) gota, aok := c.Get("a") gotb, bok := c.Get("b") fmt.Println(gota, aok) fmt.Println(gotb, bok) // Output: // 1 true // 0 false } func ExampleWithExpiration() { c := cache.New(cache.AsFIFO[string, int]()) exp := 250 * time.Millisecond c.Set("a", 1, cache.WithExpiration(exp)) // check item is set. gota, aok := c.Get("a") fmt.Println(gota, aok) // set again c.Set("a", 2, cache.WithExpiration(exp)) gota2, aok2 := c.Get("a") fmt.Println(gota2, aok2) // waiting expiration. time.Sleep(exp + 100*time.Millisecond) // + buffer gota3, aok3 := c.Get("a") // expired fmt.Println(gota3, aok3) // Output: // 1 true // 2 true // 0 false } func ExampleWithReferenceCount() { c := cache.New(cache.AsLFU[string, int](lfu.WithCapacity(2))) // set item with reference count c.Set("a", 1, cache.WithReferenceCount(5)) // check item is set. gota, aok := c.Get("a") fmt.Println(gota, aok) c.Set("b", 2) c.Set("c", 3) // evicted becauce the lowest reference count. gotb, bok := c.Get("b") fmt.Println(gotb, bok) gotc, cok := c.Get("c") fmt.Println(gotc, cok) // Output: // 1 true // 0 false // 3 true } func ExampleCache_Delete() { c := cache.New(cache.AsMRU[string, int]()) c.Set("a", 1) c.Delete("a") gota, aok := c.Get("a") fmt.Println(gota, aok) // Output: // 0 false } func ExampleCache_Keys() { c := cache.New(cache.AsLFU[string, int]()) c.Set("a", 1) c.Set("b", 1) c.Set("c", 1) fmt.Println(c.Keys()) // Output: // [a b c] } func ExampleCache_Len() { c := cache.New(cache.AsLFU[string, int]()) c.Set("a", 1) c.Set("b", 1) c.Set("c", 1) fmt.Println(c.Len()) // Output: // 3 } func ExampleCache_Contains() { c := cache.New(cache.AsLRU[string, int]()) c.Set("a", 1) fmt.Println(c.Contains("a")) fmt.Println(c.Contains("b")) // Output: // true // false } func ExampleCache_GetOrSet() { c := cache.New(cache.AsLRU[string, int](lru.WithCapacity(10))) c.Set("a", 1) val1, ok1 := c.GetOrSet("b", 2) fmt.Println(val1, ok1) val2, ok2 := c.GetOrSet("a", 3) fmt.Println(val2, ok2) // Output: // 2 false // 1 true } func ExampleNewNumber() { nc := cache.NewNumber[string, int]() nc.Set("a", 1) nc.Set("b", 2, cache.WithExpiration(time.Minute)) av := nc.Increment("a", 1) gota, aok := nc.Get("a") bv := nc.Decrement("b", 1) gotb, bok := nc.Get("b") // not set keys cv := nc.Increment("c", 100) dv := nc.Decrement("d", 100) fmt.Println(av, gota, aok) fmt.Println(bv, gotb, bok) fmt.Println(cv) fmt.Println(dv) // Output: // 2 2 true // 1 1 true // 100 // -100 } go-generics-cache-1.5.1/expiration.go000066400000000000000000000036411460720355600175060ustar00rootroot00000000000000package cache import ( "container/heap" "time" ) type expirationManager[K comparable] struct { queue expirationQueue[K] mapping map[K]*expirationKey[K] } func newExpirationManager[K comparable]() *expirationManager[K] { q := make(expirationQueue[K], 0) heap.Init(&q) return &expirationManager[K]{ queue: q, mapping: make(map[K]*expirationKey[K]), } } func (m *expirationManager[K]) update(key K, expiration time.Time) { if e, ok := m.mapping[key]; ok { e.expiration = expiration heap.Fix(&m.queue, e.index) } else { v := &expirationKey[K]{ key: key, expiration: expiration, } heap.Push(&m.queue, v) m.mapping[key] = v } } func (m *expirationManager[K]) len() int { return m.queue.Len() } func (m *expirationManager[K]) pop() K { v := heap.Pop(&m.queue) key := v.(*expirationKey[K]).key delete(m.mapping, key) return key } func (m *expirationManager[K]) remove(key K) { if e, ok := m.mapping[key]; ok { heap.Remove(&m.queue, e.index) delete(m.mapping, key) } } type expirationKey[K comparable] struct { key K expiration time.Time index int } // expirationQueue implements heap.Interface and holds CacheItems. type expirationQueue[K comparable] []*expirationKey[K] var _ heap.Interface = (*expirationQueue[int])(nil) func (pq expirationQueue[K]) Len() int { return len(pq) } func (pq expirationQueue[K]) Less(i, j int) bool { // We want Pop to give us the least based on expiration time, not the greater return pq[i].expiration.Before(pq[j].expiration) } func (pq expirationQueue[K]) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j } func (pq *expirationQueue[K]) Push(x interface{}) { n := len(*pq) item := x.(*expirationKey[K]) item.index = n *pq = append(*pq, item) } func (pq *expirationQueue[K]) Pop() interface{} { old := *pq n := len(old) item := old[n-1] item.index = -1 // For safety *pq = old[0 : n-1] return item } go-generics-cache-1.5.1/export_test.go000066400000000000000000000002631460720355600177010ustar00rootroot00000000000000package cache import "time" func SetNowFunc(tm time.Time) (reset func()) { backup := nowFunc nowFunc = func() time.Time { return tm } return func() { nowFunc = backup } } go-generics-cache-1.5.1/go.mod000066400000000000000000000001631460720355600160770ustar00rootroot00000000000000module github.com/Code-Hex/go-generics-cache go 1.18 require golang.org/x/exp v0.0.0-20220328175248-053ad81199eb go-generics-cache-1.5.1/go.sum000066400000000000000000000003171460720355600161250ustar00rootroot00000000000000golang.org/x/exp v0.0.0-20220328175248-053ad81199eb h1:pC9Okm6BVmxEw76PUu0XUbOTQ92JX11hfvqTjAV3qxM= golang.org/x/exp v0.0.0-20220328175248-053ad81199eb/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= go-generics-cache-1.5.1/janitor.go000066400000000000000000000014761460720355600167760ustar00rootroot00000000000000package cache import ( "context" "sync" "time" ) // janitor for collecting expired items and cleaning them. type janitor struct { ctx context.Context interval time.Duration done chan struct{} once sync.Once } func newJanitor(ctx context.Context, interval time.Duration) *janitor { j := &janitor{ ctx: ctx, interval: interval, done: make(chan struct{}), } return j } // stop to stop the janitor. func (j *janitor) stop() { j.once.Do(func() { close(j.done) }) } // run with the given cleanup callback function. func (j *janitor) run(cleanup func()) { go func() { ticker := time.NewTicker(j.interval) defer ticker.Stop() for { select { case <-ticker.C: cleanup() case <-j.done: cleanup() // last call return case <-j.ctx.Done(): j.stop() } } }() } go-generics-cache-1.5.1/janitor_test.go000066400000000000000000000012401460720355600200220ustar00rootroot00000000000000package cache import ( "context" "sync/atomic" "testing" "time" ) func TestJanitor(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() janitor := newJanitor(ctx, time.Millisecond) checkDone := make(chan struct{}) janitor.done = checkDone calledClean := int64(0) janitor.run(func() { atomic.AddInt64(&calledClean, 1) }) // waiting for cleanup time.Sleep(10 * time.Millisecond) cancel() select { case <-checkDone: case <-time.After(time.Second): t.Fatalf("failed to call done channel") } got := atomic.LoadInt64(&calledClean) if got <= 1 { t.Fatalf("failed to call clean callback in janitor: %d", got) } } go-generics-cache-1.5.1/policy/000077500000000000000000000000001460720355600162705ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/clock/000077500000000000000000000000001460720355600173635ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/clock/clock.go000066400000000000000000000066141460720355600210140ustar00rootroot00000000000000package clock import ( "container/ring" "github.com/Code-Hex/go-generics-cache/policy/internal/policyutil" ) // Cache is used The clock cache replacement policy. // // The clock algorithm keeps a circular list of pages in memory, with // the "hand" (iterator) pointing to the last examined page frame in the list. // When a page fault occurs and no empty frames exist, then the R (referenced) bit // is inspected at the hand's location. If R is 0, the new page is put in place of // the page the "hand" points to, and the hand is advanced one position. Otherwise, // the R bit is cleared, then the clock hand is incremented and the process is // repeated until a page is replaced. type Cache[K comparable, V any] struct { items map[K]*ring.Ring hand *ring.Ring head *ring.Ring capacity int } type entry[K comparable, V any] struct { key K val V referenceCount int } // Option is an option for clock cache. type Option func(*options) type options struct { capacity int } func newOptions() *options { return &options{ capacity: 128, } } // WithCapacity is an option to set cache capacity. func WithCapacity(cap int) Option { return func(o *options) { o.capacity = cap } } // NewCache creates a new non-thread safe clock cache whose capacity is the default size (128). func NewCache[K comparable, V any](opts ...Option) *Cache[K, V] { o := newOptions() for _, optFunc := range opts { optFunc(o) } r := ring.New(o.capacity) return &Cache[K, V]{ items: make(map[K]*ring.Ring, o.capacity), hand: r, head: r, capacity: o.capacity, } } // Set sets any item to the cache. replacing any existing item. // // If value satisfies "interface{ GetReferenceCount() int }", the value of // the GetReferenceCount() method is used to set the initial value of reference count. func (c *Cache[K, V]) Set(key K, val V) { if e, ok := c.items[key]; ok { entry := e.Value.(*entry[K, V]) entry.referenceCount++ entry.val = val return } c.evict() c.hand.Value = &entry[K, V]{ key: key, val: val, referenceCount: policyutil.GetReferenceCount(val), } c.items[key] = c.hand c.hand = c.hand.Next() } // Get looks up a key's value from the cache. func (c *Cache[K, V]) Get(key K) (zero V, _ bool) { e, ok := c.items[key] if !ok { return } entry := e.Value.(*entry[K, V]) entry.referenceCount++ return entry.val, true } func (c *Cache[K, V]) evict() { for c.hand.Value != nil && c.hand.Value.(*entry[K, V]).referenceCount > 0 { c.hand.Value.(*entry[K, V]).referenceCount-- c.hand = c.hand.Next() } if c.hand.Value != nil { entry := c.hand.Value.(*entry[K, V]) delete(c.items, entry.key) c.hand.Value = nil } } // Keys returns the keys of the cache. the order as same as current ring order. func (c *Cache[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) r := c.head if r.Value == nil { return []K{} } // the first element keys = append(keys, r.Value.(*entry[K, V]).key) // iterating for p := c.head.Next(); p != r; p = p.Next() { if p.Value == nil { continue } e := p.Value.(*entry[K, V]) keys = append(keys, e.key) } return keys } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { if e, ok := c.items[key]; ok { delete(c.items, key) e.Value = nil } } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return len(c.items) } go-generics-cache-1.5.1/policy/clock/clock_test.go000066400000000000000000000103601460720355600220440ustar00rootroot00000000000000package clock_test import ( "strconv" "strings" "testing" "github.com/Code-Hex/go-generics-cache/policy/clock" ) type tmp struct { i int } func (t *tmp) GetReferenceCount() int { return t.i } func TestSet(t *testing.T) { // set capacity is 1 cache := clock.NewCache[string, int](clock.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } if got, ok := cache.Get("foo"); got != 1 || !ok { t.Fatalf("invalid value got %d, cachehit %v", got, ok) } // if over the cap cache.Set("bar", 2) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok := cache.Get("bar") if bar != 2 || !ok { t.Fatalf("invalid value bar %d, cachehit %v", bar, ok) } // checks deleted oldest if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid eviction the oldest value for foo %v", ok) } // valid: if over the cap but same key cache.Set("bar", 100) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok = cache.Get("bar") if bar != 100 || !ok { t.Fatalf("invalid replacing value bar %d, cachehit %v", bar, ok) } t.Run("with initilal reference count", func(t *testing.T) { cache := clock.NewCache[string, *tmp](clock.WithCapacity(2)) cache.Set("foo", &tmp{i: 10}) // the highest reference count cache.Set("foo2", &tmp{i: 2}) // expected eviction if got := cache.Len(); got != 2 { t.Fatalf("invalid length: %d", got) } cache.Set("foo3", &tmp{i: 3}) // checks deleted the lowest reference count if _, ok := cache.Get("foo2"); ok { t.Fatalf("invalid delete oldest value foo2 %v", ok) } if _, ok := cache.Get("foo"); !ok { t.Fatalf("invalid value foo is not found") } }) } func TestDelete(t *testing.T) { cache := clock.NewCache[string, int](clock.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } cache.Delete("foo2") if got := cache.Len(); got != 1 { t.Fatalf("invalid length after deleted does not exist key: %d", got) } cache.Delete("foo") if got := cache.Len(); got != 0 { t.Fatalf("invalid length after deleted: %d", got) } if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid get after deleted %v", ok) } } func TestKeys(t *testing.T) { t.Run("normal", func(t *testing.T) { cache := clock.NewCache[string, int]() if len(cache.Keys()) != 0 { t.Errorf("want number of keys 0, but got %d", cache.Len()) } cache.Set("foo", 1) cache.Set("bar", 2) cache.Set("baz", 3) cache.Set("bar", 4) // again cache.Set("foo", 5) // again got := strings.Join(cache.Keys(), ",") want := strings.Join([]string{ "foo", "bar", "baz", }, ",") if got != want { t.Errorf("want %q, but got %q", want, got) } if len(cache.Keys()) != cache.Len() { t.Errorf("want number of keys %d, but got %d", len(cache.Keys()), cache.Len()) } }) t.Run("with deletion", func(t *testing.T) { cache := clock.NewCache[string, int](clock.WithCapacity(4)) cache.Set("foo", 1) cache.Set("bar", 2) cache.Set("baz", 3) cache.Delete("bar") // delete in middle got := strings.Join(cache.Keys(), ",") want := strings.Join([]string{ "foo", "baz", }, ",") if got != want { t.Errorf("want %q, but got %q", want, got) } if len(cache.Keys()) != cache.Len() { t.Errorf("want number of keys %d, but got %d", len(cache.Keys()), cache.Len()) } cache.Set("hoge", 4) cache.Set("fuga", 5) // over the cap. so expected to set "bar" position. got2 := strings.Join(cache.Keys(), ",") want2 := strings.Join([]string{ "foo", "fuga", "baz", "hoge", }, ",") if got2 != want2 { t.Errorf("want2 %q, but got2 %q", want2, got2) } if len(cache.Keys()) != cache.Len() { t.Errorf("want2 number of keys %d, but got2 %d", len(cache.Keys()), cache.Len()) } }) } func TestIssue29(t *testing.T) { cap := 3 cache := clock.NewCache[string, int](clock.WithCapacity(cap)) for i := 0; i < cap; i++ { cache.Set(strconv.Itoa(i), i) } cache.Set(strconv.Itoa(cap), cap) keys := cache.Keys() if got := len(keys); cap != got { t.Errorf("want number of keys %d, but got %d", cap, got) } wantKeys := "3,1,2" gotKeys := strings.Join(keys, ",") if wantKeys != gotKeys { t.Errorf("want keys %q, but got keys %q", wantKeys, gotKeys) } } go-generics-cache-1.5.1/policy/clock/example_test.go000066400000000000000000000014251460720355600224060ustar00rootroot00000000000000package clock_test import ( "fmt" "github.com/Code-Hex/go-generics-cache/policy/clock" ) func ExampleNewCache() { c := clock.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) c.Delete("a") _, aok2 := c.Get("a") if !aok2 { fmt.Println("key 'a' has been deleted") } // update c.Set("b", 3) newbv, _ := c.Get("b") fmt.Println(newbv) // Output: // 1 true // 2 true // 0 false // key 'a' has been deleted // 3 } func ExampleCache_Keys() { c := clock.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // foo // bar // baz } go-generics-cache-1.5.1/policy/fifo/000077500000000000000000000000001460720355600172135ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/fifo/example_test.go000066400000000000000000000014211460720355600222320ustar00rootroot00000000000000package fifo_test import ( "fmt" "github.com/Code-Hex/go-generics-cache/policy/fifo" ) func ExampleNewCache() { c := fifo.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) c.Delete("a") _, aok2 := c.Get("a") if !aok2 { fmt.Println("key 'a' has been deleted") } // update c.Set("b", 3) newbv, _ := c.Get("b") fmt.Println(newbv) // Output: // 1 true // 2 true // 0 false // key 'a' has been deleted // 3 } func ExampleCache_Keys() { c := fifo.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // foo // bar // baz } go-generics-cache-1.5.1/policy/fifo/fifo.go000066400000000000000000000044611460720355600204720ustar00rootroot00000000000000package fifo import ( "container/list" ) // Cache is used a FIFO (First in first out) cache replacement policy. // // In FIFO the item that enter the cache first is evicted first // w/o any regard of how often or how many times it was accessed before. type Cache[K comparable, V any] struct { items map[K]*list.Element queue *list.List // keys capacity int } type entry[K comparable, V any] struct { key K val V } // Option is an option for FIFO cache. type Option func(*options) type options struct { capacity int } func newOptions() *options { return &options{ capacity: 128, } } // WithCapacity is an option to set cache capacity. func WithCapacity(cap int) Option { return func(o *options) { o.capacity = cap } } // NewCache creates a new non-thread safe FIFO cache whose capacity is the default size (128). func NewCache[K comparable, V any](opts ...Option) *Cache[K, V] { o := newOptions() for _, optFunc := range opts { optFunc(o) } return &Cache[K, V]{ items: make(map[K]*list.Element, o.capacity), queue: list.New(), capacity: o.capacity, } } // Set sets any item to the cache. replacing any existing item. func (c *Cache[K, V]) Set(key K, val V) { if c.queue.Len() == c.capacity { e := c.dequeue() delete(c.items, e.Value.(*entry[K, V]).key) } c.Delete(key) // delete old key if already exists specified key. entry := &entry[K, V]{ key: key, val: val, } e := c.queue.PushBack(entry) c.items[key] = e } // Get gets an item from the cache. // Returns the item or zero value, and a bool indicating whether the key was found. func (c *Cache[K, V]) Get(k K) (val V, ok bool) { got, found := c.items[k] if !found { return } return got.Value.(*entry[K, V]).val, true } // Keys returns cache keys. func (c *Cache[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) for e := c.queue.Front(); e != nil; e = e.Next() { keys = append(keys, e.Value.(*entry[K, V]).key) } return keys } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { if e, ok := c.items[key]; ok { c.queue.Remove(e) delete(c.items, key) } } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return c.queue.Len() } func (c *Cache[K, V]) dequeue() *list.Element { e := c.queue.Front() c.queue.Remove(e) return e } go-generics-cache-1.5.1/policy/fifo/fifo_test.go000066400000000000000000000040151460720355600215240ustar00rootroot00000000000000package fifo_test import ( "strings" "testing" "github.com/Code-Hex/go-generics-cache/policy/fifo" ) func TestSet(t *testing.T) { // set capacity is 1 cache := fifo.NewCache[string, int](fifo.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } if got, ok := cache.Get("foo"); got != 1 || !ok { t.Fatalf("invalid value got %d, cachehit %v", got, ok) } // if over the cap cache.Set("bar", 2) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok := cache.Get("bar") if bar != 2 || !ok { t.Fatalf("invalid value bar %d, cachehit %v", bar, ok) } // checks deleted oldest if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid eviction the oldest value for foo %v", ok) } // valid: if over the cap but same key cache.Set("bar", 100) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok = cache.Get("bar") if bar != 100 || !ok { t.Fatalf("invalid replacing value bar %d, cachehit %v", bar, ok) } } func TestDelete(t *testing.T) { cache := fifo.NewCache[string, int](fifo.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } cache.Delete("foo2") if got := cache.Len(); got != 1 { t.Fatalf("invalid length after deleted does not exist key: %d", got) } cache.Delete("foo") if got := cache.Len(); got != 0 { t.Fatalf("invalid length after deleted: %d", got) } if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid get after deleted %v", ok) } } func TestKeys(t *testing.T) { cache := fifo.NewCache[string, int]() cache.Set("foo", 1) cache.Set("bar", 2) cache.Set("baz", 3) cache.Set("bar", 4) // again cache.Set("foo", 5) // again got := strings.Join(cache.Keys(), ",") want := strings.Join([]string{ "baz", "bar", "foo", }, ",") if got != want { t.Errorf("want %q, but got %q", want, got) } if len(cache.Keys()) != cache.Len() { t.Errorf("want number of keys %d, but got %d", len(cache.Keys()), cache.Len()) } } go-generics-cache-1.5.1/policy/internal/000077500000000000000000000000001460720355600201045ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/internal/policyutil/000077500000000000000000000000001460720355600223015ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/internal/policyutil/reference_count.go000066400000000000000000000003501460720355600257740ustar00rootroot00000000000000package policyutil // GetReferenceCount gets reference count from cache value. func GetReferenceCount(v any) int { if getter, ok := v.(interface{ GetReferenceCount() int }); ok { return getter.GetReferenceCount() } return 1 } go-generics-cache-1.5.1/policy/internal/policyutil/reference_count_test.go000066400000000000000000000012251460720355600270350ustar00rootroot00000000000000package policyutil import ( "testing" ) type refCounter struct { count int } func (r refCounter) GetReferenceCount() int { return r.count } func TestGetReferenceCount(t *testing.T) { tests := []struct { name string input any want int }{ { name: "with GetReferenceCount() method", input: refCounter{count: 5}, want: 5, }, { name: "without GetReferenceCount() method", input: "sample string", want: 1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { output := GetReferenceCount(test.input) if output != test.want { t.Errorf("want %d, got %d", test.want, output) } }) } } go-generics-cache-1.5.1/policy/lfu/000077500000000000000000000000001460720355600170565ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/lfu/example_test.go000066400000000000000000000010711460720355600220760ustar00rootroot00000000000000package lfu_test import ( "fmt" "github.com/Code-Hex/go-generics-cache/policy/lfu" ) func ExampleNewCache() { c := lfu.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) // Output: // 1 true // 2 true // 0 false } func ExampleCache_Keys() { c := lfu.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) c.Set("c", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // a // b // c } go-generics-cache-1.5.1/policy/lfu/lfu.go000066400000000000000000000050511460720355600201740ustar00rootroot00000000000000package lfu import ( "container/heap" ) // Cache is used a LFU (Least-frequently used) cache replacement policy. // // Counts how often an item is needed. Those that are used least often are discarded first. // This works very similar to LRU except that instead of storing the value of how recently // a block was accessed, we store the value of how many times it was accessed. So of course // while running an access sequence we will replace a block which was used fewest times from our cache. type Cache[K comparable, V any] struct { cap int queue *priorityQueue[K, V] items map[K]*entry[K, V] } // Option is an option for LFU cache. type Option func(*options) type options struct { capacity int } func newOptions() *options { return &options{ capacity: 128, } } // WithCapacity is an option to set cache capacity. func WithCapacity(cap int) Option { return func(o *options) { o.capacity = cap } } // NewCache creates a new non-thread safe LFU cache whose capacity is the default size (128). func NewCache[K comparable, V any](opts ...Option) *Cache[K, V] { o := newOptions() for _, optFunc := range opts { optFunc(o) } return &Cache[K, V]{ cap: o.capacity, queue: newPriorityQueue[K, V](o.capacity), items: make(map[K]*entry[K, V], o.capacity), } } // Get looks up a key's value from the cache. func (c *Cache[K, V]) Get(key K) (zero V, _ bool) { e, ok := c.items[key] if !ok { return } e.referenced() heap.Fix(c.queue, e.index) return e.val, true } // Set sets a value to the cache with key. replacing any existing value. // // If value satisfies "interface{ GetReferenceCount() int }", the value of // the GetReferenceCount() method is used to set the initial value of reference count. func (c *Cache[K, V]) Set(key K, val V) { if e, ok := c.items[key]; ok { c.queue.update(e, val) return } if len(c.items) == c.cap { evictedEntry := heap.Pop(c.queue) if evictedEntry != nil { delete(c.items, evictedEntry.(*entry[K, V]).key) } } e := newEntry(key, val) heap.Push(c.queue, e) c.items[key] = e } // Keys returns the keys of the cache. the order is from oldest to newest. func (c *Cache[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) for _, entry := range *c.queue { keys = append(keys, entry.key) } return keys } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { if e, ok := c.items[key]; ok { heap.Remove(c.queue, e.index) delete(c.items, key) } } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return c.queue.Len() } go-generics-cache-1.5.1/policy/lfu/lfu_test.go000066400000000000000000000051341460720355600212350ustar00rootroot00000000000000package lfu_test import ( "testing" "github.com/Code-Hex/go-generics-cache/policy/lfu" ) type tmp struct { i int } func (t *tmp) GetReferenceCount() int { return t.i } func TestSet(t *testing.T) { // set capacity is 1 cache := lfu.NewCache[string, int](lfu.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } if got, ok := cache.Get("foo"); got != 1 || !ok { t.Fatalf("invalid value got %d, cachehit %v", got, ok) } // if over the cap cache.Set("bar", 2) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok := cache.Get("bar") if bar != 2 || !ok { t.Fatalf("invalid value bar %d, cachehit %v", bar, ok) } // checks deleted oldest if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid delete oldest value foo %v", ok) } // valid: if over the cap but same key cache.Set("bar", 100) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok = cache.Get("bar") if bar != 100 || !ok { t.Fatalf("invalid replacing value bar %d, cachehit %v", bar, ok) } t.Run("with initilal reference count", func(t *testing.T) { cache := lfu.NewCache[string, *tmp](lfu.WithCapacity(2)) cache.Set("foo", &tmp{i: 10}) // the highest reference count cache.Set("foo2", &tmp{i: 2}) // expected eviction if got := cache.Len(); got != 2 { t.Fatalf("invalid length: %d", got) } cache.Set("foo3", &tmp{i: 3}) // checks deleted the lowest reference count if _, ok := cache.Get("foo2"); ok { t.Fatalf("invalid delete oldest value foo2 %v", ok) } if _, ok := cache.Get("foo"); !ok { t.Fatalf("invalid value foo is not found") } }) } func TestDelete(t *testing.T) { cache := lfu.NewCache[string, int](lfu.WithCapacity(2)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } cache.Delete("foo2") if got := cache.Len(); got != 1 { t.Fatalf("invalid length after deleted does not exist key: %d", got) } cache.Delete("foo") if got := cache.Len(); got != 0 { t.Fatalf("invalid length after deleted: %d", got) } if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid get after deleted %v", ok) } } // check don't panic func TestIssue33(t *testing.T) { cache := lfu.NewCache[string, int](lfu.WithCapacity(2)) cache.Set("foo", 1) cache.Set("foo2", 2) cache.Set("foo3", 3) cache.Delete("foo") cache.Delete("foo2") cache.Delete("foo3") } func TestZeroCap(t *testing.T) { cache := lfu.NewCache[string, int](lfu.WithCapacity(0)) cache.Set("foo", 1) v, ok := cache.Get("foo") if !ok { t.Error(ok) } if v != 1 { t.Error(v) } } go-generics-cache-1.5.1/policy/lfu/priority_queue.go000066400000000000000000000035151460720355600224760ustar00rootroot00000000000000package lfu import ( "container/heap" "time" "github.com/Code-Hex/go-generics-cache/policy/internal/policyutil" ) type entry[K comparable, V any] struct { index int key K val V referenceCount int referencedAt time.Time } func newEntry[K comparable, V any](key K, val V) *entry[K, V] { return &entry[K, V]{ index: 0, key: key, val: val, referenceCount: policyutil.GetReferenceCount(val), referencedAt: time.Now(), } } func (e *entry[K, V]) referenced() { e.referenceCount++ e.referencedAt = time.Now() } type priorityQueue[K comparable, V any] []*entry[K, V] func newPriorityQueue[K comparable, V any](cap int) *priorityQueue[K, V] { queue := make(priorityQueue[K, V], 0, cap) return &queue } // see example of priority queue: https://pkg.go.dev/container/heap var _ heap.Interface = (*priorityQueue[struct{}, interface{}])(nil) func (q priorityQueue[K, V]) Len() int { return len(q) } func (q priorityQueue[K, V]) Less(i, j int) bool { if q[i].referenceCount == q[j].referenceCount { return q[i].referencedAt.Before(q[j].referencedAt) } return q[i].referenceCount < q[j].referenceCount } func (q priorityQueue[K, V]) Swap(i, j int) { if len(q) < 2 { return } q[i], q[j] = q[j], q[i] q[i].index = i q[j].index = j } func (q *priorityQueue[K, V]) Push(x interface{}) { entry := x.(*entry[K, V]) entry.index = len(*q) *q = append(*q, entry) } func (q *priorityQueue[K, V]) Pop() interface{} { old := *q n := len(old) if n == 0 { return nil // Return nil if the queue is empty to prevent panic } entry := old[n-1] old[n-1] = nil // avoid memory leak entry.index = -1 // for safety new := old[0 : n-1] *q = new return entry } func (q *priorityQueue[K, V]) update(e *entry[K, V], val V) { e.val = val e.referenced() heap.Fix(q, e.index) } go-generics-cache-1.5.1/policy/lfu/priority_queue_test.go000066400000000000000000000112731460720355600235350ustar00rootroot00000000000000package lfu import ( "container/heap" "reflect" "testing" "time" ) func TestPriorityQueue(t *testing.T) { // perl -MList::Util -e 'print join ",", List::Util::shuffle(1..10)' nums := []int{2, 1, 4, 5, 6, 9, 7, 10, 8, 3} queue := newPriorityQueue[int, int](len(nums)) entries := make([]*entry[int, int], 0, len(nums)) for _, v := range nums { entry := newEntry(v, v) entries = append(entries, entry) heap.Push(queue, entry) } if got := queue.Len(); len(nums) != got { t.Errorf("want %d, but got %d", len(nums), got) } // check the initial state for idx, entry := range *queue { if entry.index != idx { t.Errorf("want index %d, but got %d", entry.index, idx) } if entry.referenceCount != 1 { t.Errorf("want count 1") } if got := entry.val; nums[idx] != got { t.Errorf("want value %d but got %d", nums[idx], got) } } // updates len - 1 entries (updated all reference count and referenced_at) // so the lowest priority will be the last element. // // this loop creates // - Reference counters other than the last element are 2. // - The first element is the oldest referenced_at in reference counter is 2 for i := 0; i < len(nums)-1; i++ { entry := entries[i] queue.update(entry, nums[i]) time.Sleep(time.Millisecond) } // check the priority by reference counter wantValue := nums[len(nums)-1] got := heap.Pop(queue).(*entry[int, int]) if got.index != -1 { t.Errorf("want index -1, but got %d", got.index) } if wantValue != got.val { t.Errorf("want the lowest priority value is %d, but got %d", wantValue, got.val) } if want, got := len(nums)-1, queue.Len(); want != got { t.Errorf("want %d, but got %d", want, got) } // check the priority by referenced_at wantValue2 := nums[0] got2 := heap.Pop(queue).(*entry[int, int]) if got.index != -1 { t.Errorf("want index -1, but got %d", got.index) } if wantValue2 != got2.val { t.Errorf("want the lowest priority value is %d, but got %d", wantValue2, got2.val) } if want, got := len(nums)-2, queue.Len(); want != got { t.Errorf("want %d, but got %d", want, got) } } func Test_priorityQueue_Swap(t *testing.T) { type args struct { i int j int } type testCase[K comparable, V any] struct { name string q *priorityQueue[K, V] args args want *priorityQueue[K, V] } tests := []testCase[string, int]{ { name: "swap case", q: func() *priorityQueue[string, int] { q := newPriorityQueue[string, int](10) q.Push(&entry[string, int]{index: 0}) q.Push(&entry[string, int]{index: 1}) return q }(), args: args{i: 0, j: 1}, want: func() *priorityQueue[string, int] { q := newPriorityQueue[string, int](10) q.Push(&entry[string, int]{index: 1}) q.Push(&entry[string, int]{index: 0}) return q }(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.q.Swap(tt.args.i, tt.args.j) if !reflect.DeepEqual(tt.q, tt.want) { t.Errorf("want %v, got %v", tt.want, tt.q) } }) } } func TestPriorityQueue_Pop(t *testing.T) { t.Run("Pop from empty queue", func(t *testing.T) { pq := newPriorityQueue[int, string](0) if elem := heap.Pop(pq); elem != nil { t.Errorf("Expected nil from empty queue, got %v", elem) } }) t.Run("Pop from queue with single element", func(t *testing.T) { pq := newPriorityQueue[int, string](10) heap.Push(pq, newEntry(1, "one")) if pq.Len() != 1 { t.Fatalf("Expected queue length of 1, got %d", pq.Len()) } elem := heap.Pop(pq).(*entry[int, string]) if elem.key != 1 || elem.val != "one" { t.Errorf("Expected to pop element with key=1 and val='one', got key=%d and val='%s'", elem.key, elem.val) } if pq.Len() != 0 { t.Errorf("Expected empty queue after pop, got length %d", pq.Len()) } }) t.Run("Pop from queue with multiple elements", func(t *testing.T) { pq := newPriorityQueue[int, string](10) heap.Push(pq, newEntry(1, "one")) heap.Push(pq, newEntry(2, "two")) heap.Push(pq, newEntry(3, "three")) // Pop the first element elem := heap.Pop(pq).(*entry[int, string]) if elem.key != 1 || elem.val != "one" { t.Errorf("Expected to pop element with key=1 and val='one', got key=%d and val='%s'", elem.key, elem.val) } // Pop the second element elem = heap.Pop(pq).(*entry[int, string]) if elem.key != 2 || elem.val != "two" { t.Errorf("Expected to pop element with key=2 and val='two', got key=%d and val='%s'", elem.key, elem.val) } // Pop the third element elem = heap.Pop(pq).(*entry[int, string]) if elem.key != 3 || elem.val != "three" { t.Errorf("Expected to pop element with key=3 and val='three', got key=%d and val='%s'", elem.key, elem.val) } if pq.Len() != 0 { t.Errorf("Expected empty queue after all pops, got length %d", pq.Len()) } }) } go-generics-cache-1.5.1/policy/lru/000077500000000000000000000000001460720355600170725ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/lru/example_test.go000066400000000000000000000010711460720355600221120ustar00rootroot00000000000000package lru_test import ( "fmt" "github.com/Code-Hex/go-generics-cache/policy/lru" ) func ExampleNewCache() { c := lru.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) // Output: // 1 true // 2 true // 0 false } func ExampleCache_Keys() { c := lru.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) c.Set("c", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // a // b // c } go-generics-cache-1.5.1/policy/lru/lru.go000066400000000000000000000050031460720355600202210ustar00rootroot00000000000000package lru import ( "container/list" ) // Cache is used a LRU (Least recently used) cache replacement policy. // // Discards the least recently used items first. This algorithm requires // keeping track of what was used when, which is expensive if one wants // to make sure the algorithm always discards the least recently used item. type Cache[K comparable, V any] struct { cap int list *list.List items map[K]*list.Element } type entry[K comparable, V any] struct { key K val V } // Option is an option for LRU cache. type Option func(*options) type options struct { capacity int } func newOptions() *options { return &options{ capacity: 128, } } // WithCapacity is an option to set cache capacity. func WithCapacity(cap int) Option { return func(o *options) { o.capacity = cap } } // NewCache creates a new non-thread safe LRU cache whose capacity is the default size (128). func NewCache[K comparable, V any](opts ...Option) *Cache[K, V] { o := newOptions() for _, optFunc := range opts { optFunc(o) } return &Cache[K, V]{ cap: o.capacity, list: list.New(), items: make(map[K]*list.Element, o.capacity), } } // Get looks up a key's value from the cache. func (c *Cache[K, V]) Get(key K) (zero V, _ bool) { e, ok := c.items[key] if !ok { return } // updates cache order c.list.MoveToFront(e) return e.Value.(*entry[K, V]).val, true } // Set sets a value to the cache with key. replacing any existing value. func (c *Cache[K, V]) Set(key K, val V) { if e, ok := c.items[key]; ok { // updates cache order c.list.MoveToFront(e) entry := e.Value.(*entry[K, V]) entry.val = val return } newEntry := &entry[K, V]{ key: key, val: val, } e := c.list.PushFront(newEntry) c.items[key] = e if c.list.Len() > c.cap { c.deleteOldest() } } // Keys returns the keys of the cache. the order is from oldest to newest. func (c *Cache[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) for ent := c.list.Back(); ent != nil; ent = ent.Prev() { entry := ent.Value.(*entry[K, V]) keys = append(keys, entry.key) } return keys } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return c.list.Len() } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { if e, ok := c.items[key]; ok { c.delete(e) } } func (c *Cache[K, V]) deleteOldest() { e := c.list.Back() c.delete(e) } func (c *Cache[K, V]) delete(e *list.Element) { c.list.Remove(e) entry := e.Value.(*entry[K, V]) delete(c.items, entry.key) } go-generics-cache-1.5.1/policy/lru/lru_test.go000066400000000000000000000030051460720355600212600ustar00rootroot00000000000000package lru_test import ( "testing" "github.com/Code-Hex/go-generics-cache/policy/lru" ) func TestSet(t *testing.T) { // set capacity is 1 cache := lru.NewCache[string, int](lru.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } if got, ok := cache.Get("foo"); got != 1 || !ok { t.Fatalf("invalid value got %d, cachehit %v", got, ok) } // if over the cap cache.Set("bar", 2) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok := cache.Get("bar") if bar != 2 || !ok { t.Fatalf("invalid value bar %d, cachehit %v", bar, ok) } // checks deleted oldest if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid delete oldest value foo %v", ok) } // valid: if over the cap but same key cache.Set("bar", 100) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } bar, ok = cache.Get("bar") if bar != 100 || !ok { t.Fatalf("invalid replacing value bar %d, cachehit %v", bar, ok) } } func TestDelete(t *testing.T) { cache := lru.NewCache[string, int](lru.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } cache.Delete("foo2") if got := cache.Len(); got != 1 { t.Fatalf("invalid length after deleted does not exist key: %d", got) } cache.Delete("foo") if got := cache.Len(); got != 0 { t.Fatalf("invalid length after deleted: %d", got) } if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid get after deleted %v", ok) } } go-generics-cache-1.5.1/policy/mru/000077500000000000000000000000001460720355600170735ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/mru/example_test.go000066400000000000000000000010711460720355600221130ustar00rootroot00000000000000package mru_test import ( "fmt" "github.com/Code-Hex/go-generics-cache/policy/mru" ) func ExampleNewCache() { c := mru.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) // Output: // 1 true // 2 true // 0 false } func ExampleCache_Keys() { c := mru.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) c.Set("c", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // c // b // a } go-generics-cache-1.5.1/policy/mru/mru.go000066400000000000000000000045771460720355600202420ustar00rootroot00000000000000package mru import ( "container/list" ) // Cache is used a MRU (Most recently used) cache replacement policy. // // In contrast to Least Recently Used (LRU), MRU discards the most recently used items first. type Cache[K comparable, V any] struct { cap int list *list.List items map[K]*list.Element } type entry[K comparable, V any] struct { key K val V } // Option is an option for MRU cache. type Option func(*options) type options struct { capacity int } func newOptions() *options { return &options{ capacity: 128, } } // WithCapacity is an option to set cache capacity. func WithCapacity(cap int) Option { return func(o *options) { o.capacity = cap } } // NewCache creates a new non-thread safe MRU cache whose capacity is the default size (128). func NewCache[K comparable, V any](opts ...Option) *Cache[K, V] { o := newOptions() for _, optFunc := range opts { optFunc(o) } return &Cache[K, V]{ cap: o.capacity, list: list.New(), items: make(map[K]*list.Element, o.capacity), } } // Get looks up a key's value from the cache. func (c *Cache[K, V]) Get(key K) (zero V, _ bool) { e, ok := c.items[key] if !ok { return } // updates cache order c.list.MoveToBack(e) return e.Value.(*entry[K, V]).val, true } // Set sets a value to the cache with key. replacing any existing value. func (c *Cache[K, V]) Set(key K, val V) { if e, ok := c.items[key]; ok { // updates cache order c.list.MoveToBack(e) entry := e.Value.(*entry[K, V]) entry.val = val return } if c.list.Len() == c.cap { c.deleteNewest() } newEntry := &entry[K, V]{ key: key, val: val, } e := c.list.PushBack(newEntry) c.items[key] = e } // Keys returns the keys of the cache. the order is from recently used. func (c *Cache[K, V]) Keys() []K { keys := make([]K, 0, len(c.items)) for ent := c.list.Back(); ent != nil; ent = ent.Prev() { entry := ent.Value.(*entry[K, V]) keys = append(keys, entry.key) } return keys } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return c.list.Len() } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { if e, ok := c.items[key]; ok { c.delete(e) } } func (c *Cache[K, V]) deleteNewest() { e := c.list.Front() c.delete(e) } func (c *Cache[K, V]) delete(e *list.Element) { c.list.Remove(e) entry := e.Value.(*entry[K, V]) delete(c.items, entry.key) } go-generics-cache-1.5.1/policy/mru/mru_test.go000066400000000000000000000041561460720355600212720ustar00rootroot00000000000000package mru_test import ( "strings" "testing" "github.com/Code-Hex/go-generics-cache/policy/mru" ) func TestSet(t *testing.T) { cache := mru.NewCache[string, int](mru.WithCapacity(2)) cache.Set("foo", 1) cache.Set("bar", 2) if got := cache.Len(); got != 2 { t.Fatalf("invalid length: %d", got) } if got, ok := cache.Get("foo"); got != 1 || !ok { t.Fatalf("invalid value got %d, cachehit %v", got, ok) } // if over the cap cache.Set("baz", 3) if got := cache.Len(); got != 2 { t.Fatalf("invalid length: %d", got) } baz, ok := cache.Get("baz") if baz != 3 || !ok { t.Fatalf("invalid value baz %d, cachehit %v", baz, ok) } // check eviction most recently used if _, ok := cache.Get("bar"); ok { t.Log(cache.Keys()) t.Fatalf("invalid eviction the newest value for bar %v", ok) } // current state // - baz <- recently used // - foo // valid: if over the cap but specify the oldest key cache.Set("foo", 100) if got := cache.Len(); got != 2 { t.Fatalf("invalid length: %d", got) } foo, ok := cache.Get("foo") if foo != 100 || !ok { t.Fatalf("invalid replacing value foo %d, cachehit %v", foo, ok) } } func TestDelete(t *testing.T) { cache := mru.NewCache[string, int](mru.WithCapacity(1)) cache.Set("foo", 1) if got := cache.Len(); got != 1 { t.Fatalf("invalid length: %d", got) } cache.Delete("foo2") if got := cache.Len(); got != 1 { t.Fatalf("invalid length after deleted does not exist key: %d", got) } cache.Delete("foo") if got := cache.Len(); got != 0 { t.Fatalf("invalid length after deleted: %d", got) } if _, ok := cache.Get("foo"); ok { t.Fatalf("invalid get after deleted %v", ok) } } func TestKeys(t *testing.T) { cache := mru.NewCache[string, int]() cache.Set("foo", 1) cache.Set("bar", 2) cache.Set("baz", 3) cache.Set("bar", 4) // again cache.Set("foo", 5) // again got := strings.Join(cache.Keys(), ",") want := strings.Join([]string{ "foo", "bar", "baz", }, ",") if got != want { t.Errorf("want %q, but got %q", want, got) } if len(cache.Keys()) != cache.Len() { t.Errorf("want number of keys %d, but got %d", len(cache.Keys()), cache.Len()) } } go-generics-cache-1.5.1/policy/simple/000077500000000000000000000000001460720355600175615ustar00rootroot00000000000000go-generics-cache-1.5.1/policy/simple/example_test.go000066400000000000000000000024731460720355600226100ustar00rootroot00000000000000package simple_test import ( "fmt" "testing" "github.com/Code-Hex/go-generics-cache/policy/simple" ) func ExampleNewCache() { c := simple.NewCache[string, int]() c.Set("a", 1) c.Set("b", 2) av, aok := c.Get("a") bv, bok := c.Get("b") cv, cok := c.Get("c") fmt.Println(av, aok) fmt.Println(bv, bok) fmt.Println(cv, cok) c.Delete("a") _, aok2 := c.Get("a") if !aok2 { fmt.Println("key 'a' has been deleted") } // update c.Set("b", 3) newbv, _ := c.Get("b") fmt.Println(newbv) // Output: // 1 true // 2 true // 0 false // key 'a' has been deleted // 3 } func ExampleCache_Keys() { c := simple.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) keys := c.Keys() for _, key := range keys { fmt.Println(key) } // Output: // foo // bar // baz } func ExampleCache_Len() { c := simple.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) len := c.Len() fmt.Println(len) // Output: // 3 } func BenchmarkLenWithKeys(b *testing.B) { c := simple.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) for i := 0; i < b.N; i++ { var _ = len(c.Keys()) } } func BenchmarkJustLen(b *testing.B) { c := simple.NewCache[string, int]() c.Set("foo", 1) c.Set("bar", 2) c.Set("baz", 3) for i := 0; i < b.N; i++ { var _ = c.Len() } } go-generics-cache-1.5.1/policy/simple/simple.go000066400000000000000000000026211460720355600214020ustar00rootroot00000000000000package simple import ( "sort" "time" ) // Cache is a simple cache has no clear priority for evict cache. type Cache[K comparable, V any] struct { items map[K]*entry[V] } type entry[V any] struct { val V createdAt time.Time } // NewCache creates a new non-thread safe cache. func NewCache[K comparable, V any]() *Cache[K, V] { return &Cache[K, V]{ items: make(map[K]*entry[V], 0), } } // Set sets any item to the cache. replacing any existing item. // The default item never expires. func (c *Cache[K, V]) Set(k K, v V) { c.items[k] = &entry[V]{ val: v, createdAt: time.Now(), } } // Get gets an item from the cache. // Returns the item or zero value, and a bool indicating whether the key was found. func (c *Cache[K, V]) Get(k K) (val V, ok bool) { got, found := c.items[k] if !found { return } return got.val, true } // Keys returns cache keys. the order is sorted by created. func (c *Cache[K, _]) Keys() []K { ret := make([]K, 0, len(c.items)) for key := range c.items { ret = append(ret, key) } sort.Slice(ret, func(i, j int) bool { i1 := c.items[ret[i]] i2 := c.items[ret[j]] return i1.createdAt.Before(i2.createdAt) }) return ret } // Delete deletes the item with provided key from the cache. func (c *Cache[K, V]) Delete(key K) { delete(c.items, key) } // Len returns the number of items in the cache. func (c *Cache[K, V]) Len() int { return len(c.items) }