pax_global_header00006660000000000000000000000064145435102320014511gustar00rootroot0000000000000052 comment=5754ec5bdec10a3af1a2c10423eaa2ee250b777e lazycache-0.4.0/000077500000000000000000000000001454351023200134555ustar00rootroot00000000000000lazycache-0.4.0/.github/000077500000000000000000000000001454351023200150155ustar00rootroot00000000000000lazycache-0.4.0/.github/FUNDING.yml000066400000000000000000000000151454351023200166260ustar00rootroot00000000000000github: [bep]lazycache-0.4.0/.github/workflows/000077500000000000000000000000001454351023200170525ustar00rootroot00000000000000lazycache-0.4.0/.github/workflows/test.yml000066400000000000000000000030321454351023200205520ustar00rootroot00000000000000on: push: branches: [ master ] pull_request: name: Test jobs: test: strategy: matrix: go-version: [1.20.x,1.21.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: - name: Install Go uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - name: Install staticcheck run: go install honnef.co/go/tools/cmd/staticcheck@latest shell: bash - name: Update PATH run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH shell: bash - name: Checkout code uses: actions/checkout@v1 - name: Fmt if: matrix.platform != 'windows-latest' # :( run: "diff <(gofmt -d .) <(printf '')" shell: bash - name: Vet run: go vet ./... - name: Staticcheck run: staticcheck ./... - name: Test run: go test -race ./... -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic - name: Upload coverage if: success() && matrix.platform == 'ubuntu-latest' run: | curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import # One-time > curl -Os https://uploader.codecov.io/latest/linux/codecov curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig gpgv codecov.SHA256SUM.sig codecov.SHA256SUM shasum -a 256 -c codecov.SHA256SUM chmod +x codecov ./codecov lazycache-0.4.0/.gitignore000066400000000000000000000004151454351023200154450ustar00rootroot00000000000000# Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out # Dependency directories (remove the comment below to include it) # vendor/ lazycache-0.4.0/LICENSE000066400000000000000000000020651454351023200144650ustar00rootroot00000000000000MIT License Copyright (c) 2022 Bjørn Erik Pedersen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. lazycache-0.4.0/README.md000066400000000000000000000027621454351023200147430ustar00rootroot00000000000000[![Tests on Linux, MacOS and Windows](https://github.com/bep/lazycache/workflows/Test/badge.svg)](https://github.com/bep/lazycache/actions?query=workflow:Test) [![Go Report Card](https://goreportcard.com/badge/github.com/bep/lazycache)](https://goreportcard.com/report/github.com/bep/lazycache) [![codecov](https://codecov.io/github/bep/lazycache/branch/main/graph/badge.svg?token=HJCUCT07CH)](https://codecov.io/github/bep/lazycache) [![GoDoc](https://godoc.org/github.com/bep/lazycache?status.svg)](https://godoc.org/github.com/bep/lazycache) **Lazycache** is a simple thread safe in-memory LRU cache. Under the hood it leverages the great [simpleru package in golang-lru](https://github.com/hashicorp/golang-lru), with its exellent performance. One big difference between `golang-lru` and this library is the [GetOrCreate](https://pkg.go.dev/github.com/bep/lazycache#Cache.GetOrCreate) method, which provides: * Non-blocking cache priming on cache misses. * A guarantee that the prime function is only called once for a given key. * The cache's [RWMutex](https://pkg.go.dev/sync#RWMutex) is not locked during the execution of the prime function, which should make it easier to reason about potential deadlocks. Other notable features: * The API is [generic](https://go.dev/doc/tutorial/generics) * The cache can be [resized](https://pkg.go.dev/github.com/bep/lazycache#Cache.Resize) while running. * When the number of entries overflows the defined cache size, the least recently used item gets discarded (LRU). lazycache-0.4.0/codecov.yml000066400000000000000000000002161454351023200156210ustar00rootroot00000000000000coverage: status: project: default: target: auto threshold: 0.5% patch: off comment: require_changes: true lazycache-0.4.0/go.mod000066400000000000000000000006131454351023200145630ustar00rootroot00000000000000module github.com/bep/lazycache go 1.18 require ( github.com/frankban/quicktest v1.14.2 github.com/hashicorp/golang-lru/v2 v2.0.7 ) require ( github.com/google/go-cmp v0.5.7 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect ) lazycache-0.4.0/go.sum000066400000000000000000000036321454351023200146140ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= lazycache-0.4.0/lazycache.go000066400000000000000000000114551454351023200157550ustar00rootroot00000000000000package lazycache import ( "sync" "github.com/hashicorp/golang-lru/v2/simplelru" ) // New creates a new Cache. func New[K comparable, V any](options Options[K, V]) *Cache[K, V] { var onEvict simplelru.EvictCallback[K, *valueWrapper[V]] = nil if options.OnEvict != nil { onEvict = func(key K, value *valueWrapper[V]) { value.wait() if value.found { options.OnEvict(key, value.value) } } } lru, err := simplelru.NewLRU[K, *valueWrapper[V]](int(options.MaxEntries), onEvict) if err != nil { panic(err) } c := &Cache[K, V]{ lru: lru, } return c } // Options holds the cache options. type Options[K comparable, V any] struct { // MaxEntries is the maximum number of entries that the cache should hold. // Note that this can also be adjusted after the cache is created with Resize. MaxEntries int // OnEvict is an optional callback that is called when an entry is evicted. OnEvict func(key K, value V) } // Cache is a thread-safe resizable LRU cache. type Cache[K comparable, V any] struct { lru *simplelru.LRU[K, *valueWrapper[V]] mu sync.RWMutex zerov V } // Delete deletes the item with given key from the cache, returning if the // key was contained. func (c *Cache[K, V]) Delete(key K) bool { c.mu.Lock() defer c.mu.Unlock() return c.lru.Remove(key) } // DeleteFunc deletes all entries for which the given function returns true. func (c *Cache[K, V]) DeleteFunc(matches func(key K, item V) bool) int { c.mu.RLock() keys := c.lru.Keys() var keysToDelete []K for _, key := range keys { w, _ := c.lru.Peek(key) if !w.wait().found { continue } if matches(key, w.value) { keysToDelete = append(keysToDelete, key) } } c.mu.RUnlock() c.mu.Lock() defer c.mu.Unlock() var deleteCount int for _, key := range keysToDelete { if c.lru.Remove(key) { deleteCount++ } } return deleteCount } // Get returns the value associated with key. func (c *Cache[K, V]) Get(key K) (V, bool) { c.mu.Lock() w := c.get(key) c.mu.Unlock() if w == nil { return c.zerov, false } w.wait() return w.value, w.found } // GetOrCreate returns the value associated with key, or creates it if it doesn't. // It also returns a bool indicating if the value was found in the cache. // Note that create, the cache prime function, is called once and then not called again for a given key // unless the cache entry is evicted; it does not block other goroutines from calling GetOrCreate, // it is not called with the cache lock held. // Note that any error returned by create will be returned by GetOrCreate and repeated calls with the same key will // receive the same error. func (c *Cache[K, V]) GetOrCreate(key K, create func(key K) (V, error)) (V, bool, error) { c.mu.Lock() w := c.get(key) if w != nil { c.mu.Unlock() w.wait() // If w.ready is nil, we will repeat any error from the create function to concurrent callers. return w.value, true, w.err } w = &valueWrapper[V]{ ready: make(chan struct{}), } // Concurrent access to the same key will see w, but needs to wait for w.ready // to get the value. c.lru.Add(key, w) c.mu.Unlock() // Create the value with the lock released. v, err := create(key) w.err = err w.value = v w.found = err == nil close(w.ready) if err != nil { c.Delete(key) return c.zerov, false, err } return v, false, nil } // Resize changes the cache size and returns the number of entries evicted. func (c *Cache[K, V]) Resize(size int) (evicted int) { c.mu.Lock() evicted = c.lru.Resize(size) c.mu.Unlock() return evicted } // Set associates value with key. func (c *Cache[K, V]) Set(key K, value V) { c.mu.Lock() c.lru.Add(key, &valueWrapper[V]{value: value, found: true}) c.mu.Unlock() } func (c *Cache[K, V]) get(key K) *valueWrapper[V] { w, ok := c.lru.Get(key) if !ok { return nil } return w } // contains returns true if the given key is in the cache. // note that this wil also return true if the key is in the cache but the value is not yet ready. func (c *Cache[K, V]) contains(key K) bool { c.mu.RLock() b := c.lru.Contains(key) c.mu.RUnlock() return b } // keys returns a slice of the keys in the cache, oldest first. // note that this wil also include keys that are not yet ready. func (c *Cache[K, V]) keys() []K { c.mu.RLock() defer c.mu.RUnlock() return c.lru.Keys() } // len returns the number of items in the cache. // note that this wil also include values that are not yet ready. func (c *Cache[K, V]) len() int { c.mu.RLock() defer c.mu.RUnlock() return c.lru.Len() } // valueWrapper holds a cache value that is not available unless the done channel is nil or closed. // This construct makes more sense if you look at the code in GetOrCreate. type valueWrapper[V any] struct { value V found bool err error ready chan struct{} } func (w *valueWrapper[V]) wait() *valueWrapper[V] { if w.ready != nil { <-w.ready } return w } lazycache-0.4.0/lazycache_test.go000066400000000000000000000211451454351023200170110ustar00rootroot00000000000000package lazycache import ( "errors" "fmt" "math/rand" "sync" "sync/atomic" "testing" "time" qt "github.com/frankban/quicktest" ) func TestCache(t *testing.T) { c := qt.New(t) cache := New[int, any](Options[int, any]{MaxEntries: 10}) get := func(key int) any { v, found := cache.Get(key) if !found { return nil } return v } c.Assert(get(123456), qt.IsNil) cache.Set(123456, 32) c.Assert(get(123456), qt.Equals, 32) for i := 0; i < 20; i++ { cache.Set(i, i) } c.Assert(get(123456), qt.IsNil) c.Assert(cache.Resize(5), qt.Equals, 5) c.Assert(get(3), qt.IsNil) c.Assert(cache.contains(18), qt.IsTrue) c.Assert(cache.keys(), qt.DeepEquals, []int{15, 16, 17, 18, 19}) c.Assert(cache.DeleteFunc( func(key int, value any) bool { return value.(int) > 15 }, ), qt.Equals, 4) c.Assert(cache.contains(18), qt.IsFalse) c.Assert(cache.contains(15), qt.IsTrue) c.Assert(cache.Delete(15), qt.IsTrue) c.Assert(cache.Delete(15), qt.IsFalse) c.Assert(cache.contains(15), qt.IsFalse) c.Assert(cache.len(), qt.Equals, 0) c.Assert(func() { New[int, any](Options[int, any]{MaxEntries: -1}) }, qt.PanicMatches, "must provide a positive size") } func TestDeleteFunc(t *testing.T) { c := qt.New(t) c.Run("Basic", func(c *qt.C) { cache := New(Options[int, any]{MaxEntries: 1000}) for i := 0; i < 10; i++ { cache.Set(i, i) } c.Assert(cache.DeleteFunc(func(key int, value any) bool { return key%2 == 0 }), qt.Equals, 5) c.Assert(cache.len(), qt.Equals, 5) }) c.Run("Temporary", func(c *qt.C) { var wg sync.WaitGroup // There's some timing involved in this test, so we'll need // to retry a few times to cover all the cases. for i := 0; i < 100; i++ { cache := New(Options[int, any]{MaxEntries: 1000}) for i := 0; i < 10; i++ { cache.Set(i, i) } wg.Add(1) go func() { defer wg.Done() for i := 10; i < 30; i++ { v, _, err := cache.GetOrCreate(i, func(key int) (any, error) { if key%2 == 0 { return nil, errors.New("failed") } time.Sleep(10 * time.Microsecond) return key, nil }) if err != nil { c.Assert(err, qt.ErrorMatches, "failed") } else { c.Assert(v, qt.Equals, i) } } }() time.Sleep(3 * time.Microsecond) c.Assert(cache.DeleteFunc(func(key int, value any) bool { return key%2 == 0 }), qt.Equals, 5) } wg.Wait() }) } func TestGetOrCreate(t *testing.T) { c := qt.New(t) cache := New(Options[int, any]{MaxEntries: 100}) counter := 0 create := func(key int) (any, error) { counter++ return fmt.Sprintf("value-%d-%d", key, counter), nil } for i := 0; i < 3; i++ { res, found, err := cache.GetOrCreate(123456, create) c.Assert(err, qt.IsNil) c.Assert(res, qt.Equals, "value-123456-1") c.Assert(found, qt.Equals, i > 0) } v, found := cache.Get(123456) c.Assert(found, qt.IsTrue) c.Assert(v, qt.Equals, "value-123456-1") } func TestGetOrCreateError(t *testing.T) { c := qt.New(t) cache := New(Options[int, any]{MaxEntries: 100}) create := func(key int) (any, error) { return nil, fmt.Errorf("failed") } res, _, err := cache.GetOrCreate(123456, create) c.Assert(err, qt.ErrorMatches, "failed") c.Assert(res, qt.IsNil) } func TestOnEvict(t *testing.T) { c := qt.New(t) var onEvictCalled bool cache := New(Options[int, any]{MaxEntries: 20, OnEvict: func(key int, value any) { onEvictCalled = true }}) create := func(key int) (any, error) { return key, nil } for i := 0; i < 25; i++ { cache.GetOrCreate(i, create) } c.Assert(onEvictCalled, qt.IsTrue) } func TestGetOrCreateConcurrent(t *testing.T) { c := qt.New(t) cache := New(Options[int, any]{MaxEntries: 1000}) var countersmu sync.Mutex counters := make(map[int]int) create := func(key int) (any, error) { countersmu.Lock() count := counters[key] counters[key]++ countersmu.Unlock() time.Sleep(time.Duration(rand.Intn(40)+1) * time.Millisecond) return fmt.Sprintf("%v-%d", key, count), nil } var wg sync.WaitGroup for i := 0; i < 20; i++ { i := i expect := fmt.Sprintf("%d-0", i) wg.Add(1) go func() { defer wg.Done() for j := 0; j < 12; j++ { res, _, err := cache.GetOrCreate(i, create) c.Assert(err, qt.IsNil) c.Assert(res, qt.Equals, expect) } }() } for i := 0; i < 20; i++ { i := i expect := fmt.Sprintf("%d-0", i) wg.Add(1) go func() { defer wg.Done() for j := 0; j < 12; j++ { res, found := cache.Get(i) // The value may be nil if if GetOrCreate has not been called for i yet. if found { c.Assert(res, qt.Equals, expect) } } }() } wg.Wait() } func TestGetOrCreateRecursive(t *testing.T) { c := qt.New(t) var wg sync.WaitGroup n := 200 for i := 0; i < 30; i++ { cache := New(Options[int, any]{MaxEntries: 1000}) for j := 0; j < 10; j++ { wg.Add(1) go func() { defer wg.Done() for k := 0; k < 10; k++ { // This test was added to test a deadlock situation with nested GetOrCreate calls on the same cache. // Note that the keys below are carefully selected to not overlap, as this case may still deadlock: // goroutine 1: GetOrCreate(1) => GetOrCreate(2) // goroutine 2: GetOrCreate(2) => GetOrCreate(1) key1, key2 := rand.Intn(n), rand.Intn(n)+n if key2 == key1 { key2++ } shouldFail := key1%10 == 0 v, _, err := cache.GetOrCreate(key1, func(key int) (any, error) { if shouldFail { return nil, fmt.Errorf("failed") } v, _, err := cache.GetOrCreate(key2, func(key int) (any, error) { return "inner", nil }) c.Assert(err, qt.IsNil) return v, nil }) if shouldFail { c.Assert(err, qt.ErrorMatches, "failed") c.Assert(v, qt.IsNil) } else { c.Assert(err, qt.IsNil) c.Assert(v, qt.Equals, "inner") } } }() } wg.Wait() } } func BenchmarkGetOrCreateAndGet(b *testing.B) { const maxSize = 1000 cache := New(Options[int, any]{MaxEntries: maxSize}) r := rand.New(rand.NewSource(99)) var mu sync.Mutex // Partially fill the cache. for i := 0; i < maxSize/2; i++ { cache.Set(i, i) } b.ResetTimer() b.RunParallel(func(pb *testing.PB) { b.ResetTimer() for pb.Next() { mu.Lock() i1, i2 := r.Intn(maxSize), r.Intn(maxSize) mu.Unlock() // Just Get the value. v, found := cache.Get(i1) if found && v != i1 { b.Fatalf("got %v, want %v", v, i1) } res2, _, err := cache.GetOrCreate(i2, func(key int) (any, error) { if i2%100 == 0 { // Simulate a slow create. time.Sleep(1 * time.Second) } return i2, nil }) if err != nil { b.Fatal(err) } if v := res2; v != i2 { b.Fatalf("got %v, want %v", v, i2) } } }) } func BenchmarkGetOrCreate(b *testing.B) { const maxSize = 1000 r := rand.New(rand.NewSource(99)) var mu sync.Mutex cache := New(Options[int, any]{MaxEntries: maxSize}) // Partially fill the cache. for i := 0; i < maxSize/3; i++ { cache.Set(i, i) } b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { mu.Lock() key := r.Intn(maxSize) mu.Unlock() v, _, err := cache.GetOrCreate(key, func(int) (any, error) { if key%100 == 0 { // Simulate a slow create. time.Sleep(1 * time.Second) } return key, nil }) if err != nil { b.Fatal(err) } if v != key { b.Fatalf("got %v, want %v", v, key) } } }) } func BenchmarkCacheSerial(b *testing.B) { const maxSize = 1000 b.Run("Set", func(b *testing.B) { cache := New(Options[int, any]{MaxEntries: maxSize}) for i := 0; i < b.N; i++ { cache.Set(i, i) } }) b.Run("Get", func(b *testing.B) { cache := New(Options[int, any]{MaxEntries: maxSize}) numItems := maxSize - 200 for i := 0; i < numItems; i++ { cache.Set(i, i) } b.ResetTimer() for i := 0; i < b.N; i++ { key := i % numItems _, found := cache.Get(key) if !found { b.Fatalf("unexpected nil value for key %d", key) } } }) } func BenchmarkCacheParallel(b *testing.B) { const maxSize = 1000 b.Run("Set", func(b *testing.B) { cache := New(Options[int, any]{MaxEntries: maxSize}) var counter uint32 b.RunParallel(func(pb *testing.PB) { for pb.Next() { i := int(atomic.AddUint32(&counter, 1)) cache.Set(i, i) } }) }) b.Run("Get", func(b *testing.B) { cache := New(Options[int, any]{MaxEntries: maxSize}) r := rand.New(rand.NewSource(99)) var mu sync.Mutex numItems := maxSize - 200 for i := 0; i < numItems; i++ { cache.Set(i, i) } b.RunParallel(func(pb *testing.PB) { for pb.Next() { mu.Lock() key := r.Intn(numItems) mu.Unlock() _, found := cache.Get(key) if !found { b.Fatalf("unexpected nil value for key %d", key) } } }) }) }