pax_global_header00006660000000000000000000000064145302703600014512gustar00rootroot0000000000000052 comment=c05c87fb46e652fc3e4b75dbfc15b8fa69be7efa juniper-0.15.1/000077500000000000000000000000001453027036000132525ustar00rootroot00000000000000juniper-0.15.1/.github/000077500000000000000000000000001453027036000146125ustar00rootroot00000000000000juniper-0.15.1/.github/workflows/000077500000000000000000000000001453027036000166475ustar00rootroot00000000000000juniper-0.15.1/.github/workflows/fuzz.yml000066400000000000000000000004501453027036000203670ustar00rootroot00000000000000name: Fuzz on: schedule: - cron: '0 8 * * *' jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Go uses: actions/setup-go@v2 with: go-version: 1.21 stable: false - name: Build run: bash fuzz.sh juniper-0.15.1/.github/workflows/gen_docs.yml000066400000000000000000000012631453027036000211550ustar00rootroot00000000000000name: Gen Docs on: push: branches: [ main ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Go uses: actions/setup-go@v2 with: go-version: 1.18.0-beta2 stable: false - name: Generate run: | set -euxo pipefail git config --global user.name 'godoc_gh_pages workflow' git config --global user.email 'godoc_gh_pages@users.noreply.github.com' go run github.com/bradenaw/godoc_gh_pages@latest --out_dir docs/ git add docs/ git commit -am "generate docs" git push origin `git subtree split --prefix docs main`:refs/heads/gh-pages --force juniper-0.15.1/.github/workflows/go1.18.yml000066400000000000000000000005601453027036000203100ustar00rootroot00000000000000name: Go 1.18 on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v4 with: go-version: '1.18' - name: Build run: go build -v ./... - name: Test run: go test --race ./... juniper-0.15.1/.github/workflows/go1.19.yml000066400000000000000000000005601453027036000203110ustar00rootroot00000000000000name: Go 1.19 on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v4 with: go-version: '1.19' - name: Build run: go build -v ./... - name: Test run: go test --race ./... juniper-0.15.1/.github/workflows/go1.20.yml000066400000000000000000000005601453027036000203010ustar00rootroot00000000000000name: Go 1.20 on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v4 with: go-version: '1.20' - name: Build run: go build -v ./... - name: Test run: go test --race ./... juniper-0.15.1/.github/workflows/go1.21.yml000066400000000000000000000005601453027036000203020ustar00rootroot00000000000000name: Go 1.21 on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v4 with: go-version: '1.21' - name: Build run: go build -v ./... - name: Test run: go test --race ./... juniper-0.15.1/LICENSE000066400000000000000000000020561453027036000142620ustar00rootroot00000000000000MIT License Copyright (c) 2021 Braden Walker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. juniper-0.15.1/README.md000066400000000000000000000053361453027036000145400ustar00rootroot00000000000000# Juniper [![Go Reference](https://pkg.go.dev/badge/github.com/bradenaw/juniper.svg)](https://pkg.go.dev/github.com/bradenaw/juniper) [![Go 1.18](https://github.com/bradenaw/juniper/actions/workflows/go1.18.yml/badge.svg)](https://github.com/bradenaw/juniper/actions/workflows/go1.18.yml) [![Go 1.19](https://github.com/bradenaw/juniper/actions/workflows/go1.19.yml/badge.svg)](https://github.com/bradenaw/juniper/actions/workflows/go1.19.yml) [![Go 1.20](https://github.com/bradenaw/juniper/actions/workflows/go1.20.yml/badge.svg)](https://github.com/bradenaw/juniper/actions/workflows/go1.20.yml) [![Go 1.21](https://github.com/bradenaw/juniper/actions/workflows/go1.21.yml/badge.svg)](https://github.com/bradenaw/juniper/actions/workflows/go1.21.yml) [![Fuzz](https://github.com/bradenaw/juniper/actions/workflows/fuzz.yml/badge.svg)](https://github.com/bradenaw/juniper/actions/workflows/fuzz.yml) Juniper is a library of extensions to the Go standard library using generics, including containers, iterators, and streams. - `container/tree` contains a `Map` and `Set` that keep elements in sorted order. They are implemented using a B-tree, which performs better than a binary search tree. - `container/deque` contains a double-ended queue implemented with a ring buffer. - `container/xheap` contains a min-heap similar to the standard library's `container/heap` but more ergonomic, along with a `PriorityQueue` that allows setting priorities by key. - `container/xlist` contains a linked-list similar to the standard library's `container/list`, but type-safe. - `xslices` contains some commonly-used slice operations, like `Chunk`, `Reverse`, `Clear`, and `Join`. - `iterator` contains an iterator interface used by the containers, along with functions to manipulate them, like `Map`, `While`, and `Reduce`. - `stream` contains a stream interface, which is an iterator that can fail. Useful for iterating over collections that require I/O. It has most of the same combinators as `iterator`, plus some extras like `Pipe` and `Batch`. - `parallel` contains some shorthand for common uses of goroutines to process slices, iterators, and streams in parallel, like `parallel.MapStream`. - `xsort` contains extensions to the standard library package `sort`. Notably, it also has the definition for `xsort.Less`, which is how custom orderings can be defined for sorting and also for ordered collections like from `container/tree`. - You can probably guess what's in the packages `xerrors`, `xmath`, `xmath/xrand`, `xsync`, and `xtime`. Packages that overlap directly with a standard library package are named the same but with an `x` prefix for "extensions", e.g. `sort` and `xsort`. See the [docs](https://pkg.go.dev/github.com/bradenaw/juniper) for more. juniper-0.15.1/chans/000077500000000000000000000000001453027036000143465ustar00rootroot00000000000000juniper-0.15.1/chans/chans.go000066400000000000000000000054651453027036000160030ustar00rootroot00000000000000// Package chans contains functions for manipulating channels. package chans import ( "context" "reflect" "github.com/bradenaw/juniper/xslices" ) // SendContext sends item on channel c and returns nil, unless ctx expires in which case it returns // ctx.Err(). func SendContext[T any](ctx context.Context, c chan<- T, item T) error { select { case <-ctx.Done(): return ctx.Err() case c <- item: return nil } } // RecvContext attempts to receive from channel c. If c is closed before or during, returns (_, // false, nil). If ctx expires before or during, returns (_, _, ctx.Err()). func RecvContext[T any](ctx context.Context, c <-chan T) (T, bool, error) { select { case <-ctx.Done(): var zero T return zero, false, ctx.Err() case item, ok := <-c: return item, ok, nil } } // Merge sends all values from all in channels to out. // // Merge blocks until all ins have closed and all values have been sent. It does not close out. func Merge[T any](out chan<- T, in ...<-chan T) { if len(in) == 1 { for item := range in[0] { out <- item } return } else if len(in) == 2 { merge2(out, in[0], in[1]) return } else if len(in) == 3 { merge3(out, in[0], in[1], in[2]) return } selectCases := xslices.Map(in, func(x <-chan T) reflect.SelectCase { return reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(x), } }) for { if len(selectCases) == 0 { return } chosen, item, ok := reflect.Select(selectCases) if ok { out <- item.Interface().(T) } else { selectCases = xslices.RemoveUnordered(selectCases, chosen, 1) } } } // Merge special-case with no reflection. func merge2[T any](out chan<- T, in0, in1 <-chan T) { nDone := 0 for { select { case item, ok := <-in0: if ok { out <- item } else { in0 = nil nDone++ if nDone == 2 { return } } case item, ok := <-in1: if ok { out <- item } else { in1 = nil nDone++ if nDone == 2 { return } } } } } // Merge special-case with no reflection. func merge3[T any](out chan<- T, in0, in1, in2 <-chan T) { nDone := 0 for { select { case item, ok := <-in0: if ok { out <- item } else { in0 = nil nDone++ if nDone == 3 { return } } case item, ok := <-in1: if ok { out <- item } else { in1 = nil nDone++ if nDone == 3 { return } } case item, ok := <-in2: if ok { out <- item } else { in2 = nil nDone++ if nDone == 3 { return } } } } } // Replicate sends all values sent to src to every channel in dsts. // // Replicate blocks until src is closed and all values have been sent to all dsts. It does not close // dsts. func Replicate[T any](src <-chan T, dsts ...chan<- T) { for item := range src { for _, dst := range dsts { dst <- item } } } juniper-0.15.1/chans/chans_example_test.go000066400000000000000000000017621453027036000205510ustar00rootroot00000000000000package chans_test import ( "fmt" "sync" "github.com/bradenaw/juniper/chans" ) func ExampleMerge() { a := make(chan int) go func() { a <- 0 a <- 1 a <- 2 close(a) }() b := make(chan int) go func() { b <- 5 b <- 6 b <- 7 b <- 8 close(b) }() out := make(chan int) done := make(chan struct{}) go func() { for i := range out { fmt.Println(i) } close(done) }() chans.Merge(out, a, b) close(out) <-done // Unordered output: // 0 // 1 // 2 // 5 // 6 // 7 // 8 } func ExampleReplicate() { in := make(chan int) go func() { in <- 0 in <- 1 in <- 2 in <- 3 close(in) }() var wg sync.WaitGroup wg.Add(2) a := make(chan int) go func() { for i := range a { fmt.Println(i * 2) } wg.Done() }() b := make(chan int) go func() { x := 0 for i := range b { x += i fmt.Println(x) } wg.Done() }() chans.Replicate(in, a, b) close(a) close(b) wg.Wait() // Unordered output: // 0 // 2 // 4 // 6 // 0 // 1 // 3 // 6 } juniper-0.15.1/chans/chans_test.go000066400000000000000000000046271453027036000170410ustar00rootroot00000000000000package chans import ( "math/rand" "runtime" "sync/atomic" "testing" "time" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/xslices" ) func FuzzMerge(f *testing.F) { f.Fuzz(func(t *testing.T, n int, b []byte) { if n > 5 || n <= 0 { return } t.Logf("n = %d", n) out := make(chan byte) ins := make([]chan byte, n) for i := range ins { ins[i] = make(chan byte) } ins2 := xslices.Map(ins, func(c chan byte) <-chan byte { return c }) go func() { Merge(out, ins2...) close(out) }() var inSlice []byte var outSlice []byte done := make(chan struct{}) go func() { for item := range out { outSlice = append(outSlice, item) } close(done) }() Loop: for { if len(b) < 3 { break } idx := int(b[0]) if idx >= len(ins) { break } switch b[1] { case 0: inSlice = append(inSlice, b[2]) ins[idx] <- b[2] case 1: close(ins[idx]) ins = xslices.RemoveUnordered(ins, idx, 1) default: break Loop } b = b[3:] } for _, in := range ins { close(in) } <-done require2.SlicesEqual(t, inSlice, outSlice) }) } func TestStressMerge(t *testing.T) { t.Skip() count := uint64(0) start := time.Now() go func() { for { t.Logf("%s %d", time.Since(start).Round(time.Second), count) time.Sleep(3 * time.Second) } }() for i := 0; i < runtime.GOMAXPROCS(-1); i++ { go func() { r := rand.New(rand.NewSource(time.Now().Unix())) for { n := r.Intn(4) + 1 atomic.AddUint64(&count, 1) out := make(chan byte) ins := make([]chan byte, n) for i := range ins { ins[i] = make(chan byte) } ins2 := xslices.Map(ins, func(c chan byte) <-chan byte { return c }) go func() { Merge(out, ins2...) close(out) }() var inS []byte var outS []byte done := make(chan struct{}) go func() { for item := range out { outS = append(outS, item) } close(done) }() for { if len(ins) == 0 { break } idx := r.Intn(len(ins)) switch r.Intn(2) { case 0: v := byte(r.Intn(256)) inS = append(inS, v) ins[idx] <- v case 1: close(ins[idx]) nBefore := len(ins) ins = xslices.RemoveUnordered(ins, idx, 1) require2.Equal(t, len(ins), nBefore-1) } } <-done require2.SlicesEqual(t, inS, outS) } }() } c := make(chan struct{}) <-c } juniper-0.15.1/chans/testdata/000077500000000000000000000000001453027036000161575ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/000077500000000000000000000000001453027036000171555ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMerge/000077500000000000000000000000001453027036000210735ustar00rootroot0000000000000031ad7ffe9ccdf3670ead1115352add5c1f9b9847c9ade353726c041f25abbdd9000066400000000000000000000001051453027036000321720ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(4) []byte("\x00\x010\x00\x010\x00\x01\x88 \x00") 35368ec77b179c2ae7b3379d2159f97a05bf52a7c2a028cf23c53e343c8b7593000066400000000000000000000001061453027036000314140ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(4) []byte("\x00\x01=\x00\x010\x00\x010\x00\x010") 4b3caf8fedf34cc4a7e8c9cef0ee18e4556efea151e3dba019a77f98c96b55dc000066400000000000000000000001111453027036000325170ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(2) []byte("\x00\x01\x00\x00\x01\x00\x0000\x00\x000") 58872eb4fd54bfa1c1ff16814550728ce2dfc8d532b84c6451a826747762c0c1000066400000000000000000000000751453027036000314140ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(3) []byte("\x00\x010\x00\xee0\x02\x000") 68339ee60dbae299533e33490df40c22646e18733524f8f44f496981eb4652c3000066400000000000000000000001061453027036000311330ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(4) []byte("\x00\x010\x00\x010\x00\x01\x88 \x010") 9c748438639367b45e1d2ecb88cd66cee0a253b4151fc7298ee11bbee509389d000066400000000000000000000001061453027036000315670ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(4) []byte("\x00\x010\x00\x010\x00\x01\x88 \x000") ce5376a3d7c0fb2c30dbf2d7c1c0689ab57856b45491a6f70d5056b4d1f700c0000066400000000000000000000000531453027036000316000ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(2) []byte("\x02\x000") d1c44ba01efc79fbf09fb88f020eb832fecfce8d59b3486a653e237dd20c55c9000066400000000000000000000000531453027036000322110ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(3) []byte("\x02z\xec") de56e72f63217f604fa5425cea6c2be8a29a30aaaede2ccac6f3f986d0289351000066400000000000000000000001251453027036000321120ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(5) []byte("\x03\x010\x0000\x0000\x000\x00\x020000\x00000\x0200") e5c84966ac39cebda994ab3840a5d5256a9d393ed68e8dd588a0bc444b15d2b7000066400000000000000000000001111453027036000317770ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(2) []byte("\x00\x01\x00\x00\x010\x00\x000\x00\x000") fcfc72958cc76fcae152178010a2803f711e673a4d8f2d74bfb02e95f722a938000066400000000000000000000001251453027036000315470ustar00rootroot00000000000000juniper-0.15.1/chans/testdata/fuzz/FuzzMergego test fuzz v1 int(5) []byte("\x03\x010\x0000\x0000\x0000\x0000\x000\x02000\x0200") juniper-0.15.1/container/000077500000000000000000000000001453027036000152345ustar00rootroot00000000000000juniper-0.15.1/container/deque/000077500000000000000000000000001453027036000163375ustar00rootroot00000000000000juniper-0.15.1/container/deque/deque.go000066400000000000000000000115111453027036000177700ustar00rootroot00000000000000// Package deque contains a double-ended queue. package deque import ( "errors" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xmath" ) var errDequeEmpty = errors.New("pop from empty deque") var errDequeModified = errors.New("deque modified during iteration") const ( // A non-empty deque has space for at least this many items. minSize = 16 // When growing a full deque, reallocate with len(d.a)*growFactor. growFactor = 2 ) // Deque is a double-ended queue, allowing push and pop to both the front and back of the queue. // Pushes and pops are amortized O(1). The zero-value is ready to use. Deque should not be copied // after first use. type Deque[T any] struct { // Backing slice for the deque. Empty if the deque is empty. a []T // Index of the first item. front int // Index of the last item. back int gen int } // Len returns the number of items in the deque. func (d *Deque[T]) Len() int { if d.a == nil || d.back == -1 { return 0 } if d.front <= d.back { return d.back - d.front + 1 } return len(d.a) - d.front + d.back + 1 } // Grow allocates sufficient space to add n more items without needing to reallocate. func (d *Deque[T]) Grow(n int) { extraCap := len(d.a) - d.Len() if extraCap < n { d.resize(len(d.a) + n) } } // Shrink reallocates the backing buffer for d, if necessary, so that it fits only the current size // plus at most n extra items. func (d *Deque[T]) Shrink(n int) { if n < 0 { panic("Shrink() with a negative number of extras") } if len(d.a)-d.Len() > n { d.resize(d.Len() + n) } } // PushFront adds item to the front of the deque. func (d *Deque[T]) PushFront(item T) { if d.Len() == 0 { d.a = make([]T, minSize) d.a[0] = item d.back = 0 return } d.maybeExpand() d.front = positiveMod(d.front-1, len(d.a)) d.a[d.front] = item d.gen++ } // PushFront adds item to the back of the deque. func (d *Deque[T]) PushBack(item T) { if d.Len() == 0 { d.a = make([]T, minSize) d.a[0] = item d.back = 0 return } d.maybeExpand() d.back = (d.back + 1) % len(d.a) d.a[d.back] = item d.gen++ } // Guarantees that there is room in the deque. func (d *Deque[T]) maybeExpand() { if d.Len() == len(d.a) { d.resize(xmath.Max(minSize, len(d.a)*2)) } } func (d *Deque[T]) resize(n int) { oldLen := d.Len() newA := make([]T, n) if !(d.a == nil || d.back == -1) { if d.front <= d.back { copy(newA, d.a[d.front:d.back+1]) } else { copy(newA, d.a[d.front:]) copy(newA[len(d.a)-d.front:], d.a[:d.back+1]) } } d.a = newA d.front = 0 d.back = oldLen - 1 } // PopFront removes and returns the item at the front of the deque. It panics if the deque is empty. func (d *Deque[T]) PopFront() T { l := d.Len() if l == 0 { panic(errDequeEmpty) } item := d.a[d.front] if l == 1 { d.a = nil d.front = 0 d.back = -1 return item } var zero T d.a[d.front] = zero d.front = (d.front + 1) % len(d.a) d.gen++ return item } // PopBack removes and returns the item at the back of the deque. It panics if the deque is empty. func (d *Deque[T]) PopBack() T { l := d.Len() if l == 0 { panic(errDequeEmpty) } item := d.a[d.back] if l == 1 { d.a = nil d.front = 0 d.back = -1 return item } var zero T d.a[d.back] = zero d.back = positiveMod(d.back-1, len(d.a)) d.gen++ return item } // Front returns the item at the front of the deque. It panics if the deque is empty. func (d *Deque[T]) Front() T { if d.back == -1 { panic("deque index out of range") } return d.a[d.front] } // Back returns the item at the back of the deque. It panics if the deque is empty. func (d *Deque[T]) Back() T { return d.a[d.back] } // Item returns the ith item in the deque. 0 is the front and d.Len()-1 is the back. func (d *Deque[T]) Item(i int) T { if i < 0 || i >= d.Len() { panic("deque index out of range") } idx := (d.front + i) % len(d.a) return d.a[idx] } // Set sets the ith item in the deque. 0 is the front and d.Len()-1 is the back. func (d *Deque[T]) Set(i int, t T) { if i < 0 || i >= d.Len() { panic("deque index out of range") } idx := (d.front + i) % len(d.a) d.a[idx] = t } func positiveMod(l, d int) int { x := l % d if x < 0 { return x + d } return x } type dequeIterator[T any] struct { d *Deque[T] i int done bool gen int } func (iter *dequeIterator[T]) Next() (T, bool) { if iter.gen != iter.d.gen { panic(errDequeModified) } var zero T if iter.d.Len() == 0 { return zero, false } if iter.done { return zero, false } item := iter.d.a[iter.i] if iter.i == iter.d.back { iter.done = true } iter.i = (iter.i + 1) % len(iter.d.a) return item, true } // Iterate iterates over the elements of the deque. // // The iterator panics if the deque has been modified since iteration started. func (d *Deque[T]) Iterate() iterator.Iterator[T] { return &dequeIterator[T]{ d: d, i: d.front, done: false, gen: d.gen, } } juniper-0.15.1/container/deque/deque_test.go000066400000000000000000000062241453027036000210340ustar00rootroot00000000000000package deque import ( "fmt" "testing" "github.com/bradenaw/juniper/internal/fuzz" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" ) func FuzzDeque(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte) { var oracle []byte var deque Deque[byte] fuzz.Operations( b, func() { require2.Equal(t, len(oracle), deque.Len()) t.Logf(" len = %d", len(oracle)) t.Logf(" oracle state: %#v", oracle) t.Logf(" deque state: (len(r.a) = %d) %#v", len(deque.a), deque) }, // check func(x byte) { t.Logf("PushFront(%#v)", x) deque.PushFront(x) oracle = append([]byte{x}, oracle...) }, func(x byte) { t.Logf("PushBack(%#v)", x) deque.PushBack(x) oracle = append(oracle, x) }, func() { if len(oracle) == 0 { return } oracleItem := oracle[0] t.Logf("PopFront() -> %#v", oracleItem) oracle = oracle[1:] dequeItem := deque.PopFront() require2.Equal(t, oracleItem, dequeItem) }, func() { if len(oracle) == 0 { return } oracleItem := oracle[len(oracle)-1] t.Logf("PopBack() -> %#v", oracleItem) oracle = oracle[:len(oracle)-1] dequeItem := deque.PopBack() require2.Equal(t, oracleItem, dequeItem) }, func() { if len(oracle) == 0 { t.Log("Front() should panic") func() { defer func() { recover() }() deque.Front() t.FailNow() }() return } oracleItem := oracle[0] t.Logf("Front() -> %#v", oracleItem) dequeItem := deque.Front() require2.Equal(t, oracleItem, dequeItem) }, func() { if len(oracle) == 0 { t.Log("Back() should panic") func() { defer func() { recover() }() deque.Back() t.FailNow() }() return } oracleItem := oracle[len(oracle)-1] t.Logf("Back() -> %#v", oracleItem) dequeItem := deque.Back() require2.Equal(t, oracleItem, dequeItem) }, func(i int) { if i < 0 || i >= len(oracle) { t.Logf("Item(%d) should panic", i) func() { defer func() { recover() }() deque.Item(i) t.FailNow() }() return } oracleItem := oracle[i] t.Logf("Item(%d) -> %#v", i, oracleItem) dequeItem := deque.Item(i) require2.Equal(t, oracleItem, dequeItem) }, func(i int, x byte) { if i < 0 || i >= len(oracle) { t.Logf("Set(%d, x) should panic", i) func() { defer func() { recover() }() deque.Item(i) t.FailNow() }() return } t.Logf("Set(%d, %d)", i, x) oracle[i] = x deque.Set(i, x) }, func() { t.Log("Iterate()") oracleAll := oracle if len(oracleAll) == 0 { oracleAll = nil } dequeAll := iterator.Collect(deque.Iterate()) if len(dequeAll) == 0 { dequeAll = nil } require2.SlicesEqual(t, oracleAll, dequeAll) }, func(n byte) { t.Logf("Grow(%d)", n) deque.Grow(int(n)) }, ) }) } func Example() { var deque Deque[string] deque.PushFront("a") deque.PushFront("b") fmt.Println(deque.PopFront()) deque.PushBack("c") deque.PushBack("d") fmt.Println(deque.PopBack()) fmt.Println(deque.PopFront()) // Output: // b // d // a } juniper-0.15.1/container/deque/testdata/000077500000000000000000000000001453027036000201505ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/000077500000000000000000000000001453027036000211465ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDeque/000077500000000000000000000000001453027036000230705ustar00rootroot000000000000000e6b5039cdfee0ac96768fd480f4ce3df459b2efc4b4e5f6a483d3e054a545c1000066400000000000000000000000431453027036000341260ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("70707070") 72531e6b85b76902d3422675767453013308be8f9fffcd7bcb967de222505b15000066400000000000000000000000461453027036000331750ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("70!00000000") 7b7d4885c9b49aa51ed822d183ebdd7e7df6b8ccfdc4bf4f4eb571efeeff1826000066400000000000000000000001071453027036000345350ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("0000000000000070000000000000000000007\xea002") 7e979f8aefe6ae8a185596b0e4367e5c613a5db09a5ccee8ca967d7c528c5bed000066400000000000000000000001541453027036000342450ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("0\x95\x95\x02a\xa4\x97\xcd1\xaa\x8d;\xbc\xa3\x9eD3\xfb\xc0\x00M\xc6\xfb\xaes\n\r;") 90c5c9e175d7a53ecf420f8dd3971579f05696eafb38c76d01430adf864d86f8000066400000000000000000000000751453027036000336050ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("0\xcd\xcb\xdf\xc4\xdb\xd60\xca\xca") d1a30eda24471b382e49d8f7139f30c0a25b6682aa44ad004d080376676bc926000066400000000000000000000000421453027036000332670ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("000070C") f46f2a31137a82cc0820eab458e80abe967b547c6a1973dffc55c5d154d8d9d2000066400000000000000000000000411453027036000336770ustar00rootroot00000000000000juniper-0.15.1/container/deque/testdata/fuzz/FuzzDequego test fuzz v1 []byte("0A\xd5") juniper-0.15.1/container/tree/000077500000000000000000000000001453027036000161735ustar00rootroot00000000000000juniper-0.15.1/container/tree/btree.go000066400000000000000000000661461453027036000176400ustar00rootroot00000000000000package tree import ( "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xslices" "github.com/bradenaw/juniper/xsort" ) // Maximum number of children each node can have. const branchFactor = 16 // Maximum number of key/value pairs each node can have. const maxKVs = branchFactor - 1 // < minKVs means we need to merge with a neighboring sibling // // | ┌───────────────────╴size of underfilled node // | │ ┌────────╴size of sibling (any larger and t.steal() would work instead) // | │ │ ┌─╴separator between the two in parent // | ┌─────┴────┐ ┌─┴──┐ ┌┴┐ // thus, (minKVs - 1) + minKVs + 1 <= maxKVs // | └───┬───┘ // | └──╴(any larger and we wouldn't be able to fit everything // | into a single node) // // thus 2*minKVs <= maxKVs, so round-down is appropriate here // // Does not apply to the root. const minKVs = maxKVs / 2 // Invariants: // 1. Every node except the root has n >= minKVs. // 2. The root has n >= 1 if the tree is non-empty. // 3. Every node has node.n+1 children or no children. // - Notably, most nodes are leaves so we can do better space-wise if we can elide the children // array from internal nodes entirely. type btree[K, V any] struct { root *node[K, V] less xsort.Less[K] size int // incremented when tree structure changes - used to quickly avoid reseeking cursor moving // through an unchanging tree gen int } func newBtree[K any, V any](less xsort.Less[K]) *btree[K, V] { return &btree[K, V]{ less: less, root: &node[K, V]{}, size: 0, } } // | keys 0 1 2 n-1 | // // | values 0 1 2 n-1 | // // | children 0 1 2 ... n-1 n | // // | | // // | └┬┘ └┬┘ └┬┘ | // // | │ └─╴contains keys greater than keys[0] and less │ | // // | │ than keys[1] │ | // // | │ │ | // // | └─────────────╴contains keys less than keys[0] │ | // // | │ | // // | contains keys greater than keys[n-1]╶────────┘ | // type node[K any, V any] struct { // Odd ordering of fields is for better cache locality, actually does improve performance // slightly. n and the first key are always accessed on search. n int8 keys [maxKVs]K // number of k/v pairs, naturally [1, maxKVs] children [branchFactor]*node[K, V] parent *node[K, V] values [maxKVs]V } func (x *node[K, V]) leaf() bool { return x.children[0] == nil } func (x *node[K, V]) full() bool { return int(x.n) == len(x.keys) } func (t *btree[K, V]) Len() int { return t.size } func (t *btree[K, V]) Put(k K, v V) { curr := t.root for { idx, inNode := t.searchNode(k, curr) if inNode { curr.values[idx] = v return } if curr.leaf() { break } curr = curr.children[idx] } if !curr.full() { t.insertIntoLeaf(curr, k, v) } else { t.overfill(curr, k, v, nil) } t.gen++ t.size++ } func (t *btree[K, V]) Get(k K) V { curr := t.root for curr != nil { idx, inNode := t.searchNode(k, curr) if inNode { return curr.values[idx] } curr = curr.children[idx] } var zero V return zero } func (t *btree[K, V]) Contains(k K) bool { curr := t.root for curr != nil { idx, inNode := t.searchNode(k, curr) if inNode { return true } curr = curr.children[idx] } return false } func (t *btree[K, V]) Delete(k K) { curr := t.root var idx int for { var inNode bool idx, inNode = t.searchNode(k, curr) if inNode { break } if curr.leaf() { // already at a leaf and !inNode, so k isn't in the tree return } curr = curr.children[idx] } t.size-- t.gen++ var leaf *node[K, V] if curr.leaf() { removeOne(curr.keys[:int(curr.n)], idx) removeOne(curr.values[:int(curr.n)], idx) curr.n-- if curr.n >= minKVs || t.steal(curr) { return } leaf = curr } else { var replacementK K var replacementV V replacementK, replacementV, leaf = t.removeRightmost(curr.children[idx]) curr.keys[idx] = replacementK curr.values[idx] = replacementV if leaf == nil || t.steal(leaf) { return } } if leaf != t.root { t.merge(leaf) } } func (t *btree[K, V]) First() (K, V) { if t.root.n == 0 { var zeroK K var zeroV V return zeroK, zeroV } leaf := leftmostLeaf(t.root) return leaf.keys[0], leaf.values[0] } func (t *btree[K, V]) Last() (K, V) { if t.root.n == 0 { var zeroK K var zeroV V return zeroK, zeroV } leaf := rightmostLeaf(t.root) return leaf.keys[int(leaf.n)-1], leaf.values[int(leaf.n)-1] } func (t *btree[K, V]) Cursor() cursor[K, V] { c := cursor[K, V]{t: t} return c } // inserts k,v into the non-full leaf x. func (t *btree[K, V]) insertIntoLeaf(x *node[K, V], k K, v V) { idx := 0 for idx < int(x.n) { if t.less(k, x.keys[idx]) { break } idx++ } insertOne(x.keys[:int(x.n)+1], idx, k) insertOne(x.values[:int(x.n)+1], idx, v) x.n++ } // overfill adds k/v and k's right child afterK to an already-full x by splitting x into two. This // adds a separater to x's parent, which may cause it to overflow and also need a split. func (t *btree[K, V]) overfill(x *node[K, V], k K, v V, afterK *node[K, V]) { for { all := newAmalgam1(t.less, &x.keys, &x.values, &x.children, k, v, afterK) left := x right := &node[K, V]{} leaf := x.leaf() medianIdx := all.Len() / 2 sepKey := all.Key(medianIdx) sepValue := all.Value(medianIdx) right.n = int8(all.Len() - medianIdx - 1) for i := 0; i < int(right.n); i++ { right.keys[i] = all.Key(medianIdx + 1 + i) right.values[i] = all.Value(medianIdx + 1 + i) } if !leaf { for i := 0; i < int(right.n)+1; i++ { right.children[i] = all.Child(medianIdx + 1 + i) right.children[i].parent = right } } left.n = int8(medianIdx) for i := int(left.n) - 1; i >= 0; i-- { left.keys[i] = all.Key(i) left.values[i] = all.Value(i) } if !leaf { for i := int(left.n); i >= 0; i-- { left.children[i] = all.Child(i) left.children[i].parent = left } } xslices.Clear(left.keys[int(left.n):]) xslices.Clear(left.values[int(left.n):]) xslices.Clear(left.children[int(left.n)+1:]) if x == t.root { parent := &node[K, V]{} parent.keys[0], parent.values[0] = sepKey, sepValue parent.n = 1 parent.children[0] = left left.parent = parent parent.children[1] = right right.parent = parent t.root = parent return } parent := left.parent if !parent.full() { idxInParent := xslices.Index(parent.children[:], left) insertOne(parent.keys[:int(parent.n)+1], idxInParent, sepKey) insertOne(parent.values[:int(parent.n)+1], idxInParent, sepValue) insertOne(parent.children[:int(parent.n)+2], idxInParent+1, right) right.parent = parent parent.n++ return } x = parent k = sepKey v = sepValue afterK = right } } // merge merges x with one of its siblings. // // Assumes that it has a sibling that has n<=minKVs. func (t *btree[K, V]) merge(x *node[K, V]) { left, right := t.siblings(x) if left != nil && left.n <= minKVs { t.mergeTwo(left, x) } else { // implies right != nil && right.n <= minKVs t.mergeTwo(x, right) } } // mergeTwo merges left and right together. This removes a node from the parent which may cause it // to be underfilled as well, and will be fixed by stealing or merging. // // Assumes either left or right has n < minKVs and the other has n == minKVs. // // | parent parent | // // | ┌───────────────┐ ┌───────────────┐ | // // | │ a g l │ │ a l │ | // // | └╴•╶─╴•╶─╴•╶─╴•─┘ └╴•╶─╴•╶─╴•╶────┘ | // // | left ┌──────┘ └───────┐ right left │ | // // | ┌───────┴───────┐ ┌───────┴───────┐ ┌───────┴───────┐ | // // | │ c │ │ h │ ╶────> │ c g h │ | // // | └╴•╶─╴•╶─╴•╶─╴•╶┘ └╴•╶─╴•╶────────┘ └╴•╶─╴•╶─╴•╶─╴•╶┘ | // func (t *btree[K, V]) mergeTwo(left, right *node[K, V]) { parent := left.parent idxInParent := xslices.Index(parent.children[:], left) sepKey := parent.keys[idxInParent] sepValue := parent.values[idxInParent] left.keys[int(left.n)] = sepKey copy(left.keys[int(left.n)+1:], right.keys[:int(right.n)]) left.values[int(left.n)] = sepValue copy(left.values[int(left.n)+1:], right.values[:int(right.n)]) copy(left.children[int(left.n)+1:], right.children[:int(right.n)+1]) if !right.leaf() { for i := 0; i < int(right.n)+1; i++ { right.children[i].parent = left } } left.n += right.n + 1 removeOne(parent.keys[:int(parent.n)], idxInParent) removeOne(parent.values[:int(parent.n)], idxInParent) removeOne(parent.children[:int(parent.n)+1], idxInParent+1) parent.n-- // signal to cursors in right that they're lost. right.n = 0 if parent == t.root { if parent.n == 0 { t.root = left left.parent = nil } } else if parent.n < minKVs && !t.steal(parent) { t.merge(parent) } } // removeRightmost finds the rightmost key and value in the subtree rooted by x and removes them. // These are by definition in a leaf. If this caused the leaf to be underfilled, also returns the // leaf they were removed from. func (t *btree[K, V]) removeRightmost(x *node[K, V]) (K, V, *node[K, V]) { curr := rightmostLeaf(x) k := curr.keys[int(curr.n)-1] v := curr.values[int(curr.n)-1] var zeroK K curr.keys[int(curr.n)-1] = zeroK var zeroV V curr.values[int(curr.n)-1] = zeroV curr.n-- var out *node[K, V] if curr.n < minKVs { out = curr } return k, v, out } // steal adds one k/v/child to x by taking from one of its siblings if possible. If not, returns // false. func (t *btree[K, V]) steal(x *node[K, V]) bool { left, right := t.siblings(x) if right != nil && right.n > minKVs { t.rotateLeft(x, right) return true } if left != nil && left.n > minKVs { t.rotateRight(left, x) return true } return false } // siblings returns x's immediate left and right siblings, or nil if none exists. func (t *btree[K, V]) siblings(x *node[K, V]) (*node[K, V], *node[K, V]) { if x.parent == nil { return nil, nil } idx := xslices.Index(x.parent.children[:], x) var left, right *node[K, V] if idx > 0 { left = x.parent.children[idx-1] } if idx < int(x.parent.n) { right = x.parent.children[idx+1] } return left, right } // | parent parent | // // | ┌───────────────┐ ┌───────────────┐ | // // | │ [g] │ │ [c] │ | // // | └╴•╶─╴•╶────────┘ └╴•╶─╴•╶────────┘ | // // | left ┌──────┘ └───────┐ right left ┌──────┘ └───────┐ right | // // | ┌───────┴───────┐ ┌───────┴───────┐ ┌───────┴───────┐ ┌───────┴───────┐ | // // | │ a b [c] │ │ h i │ ╶────> │ a b [ ] │ │ [g] h i │ | // // | └╴•╶─╴•╶─╴•╶─[•]┘ └╴•╶─╴•╶─╴•╶────┘ └╴•╶─╴•╶─╴•╶────┘ └[•]─╴•╶─╴•╶─╴•╶┘ | // // | │ │ | // // | │ child │ child | // // | ┌───────┴───────┐ ┌───────┴───────┐ | // // | │ d e f │ │ d e f │ | // // | └╴•╶─╴•╶─╴•╶─╴•╶┘ └╴•╶─╴•╶─╴•╶─╴•╶┘ | // // // (Changes marked with []) // // Assumes left and right are siblings and right is not full. func (t *btree[K, V]) rotateRight(left *node[K, V], right *node[K, V]) { idxInParent := xslices.Index(left.parent.children[:], left) oldSepK := left.parent.keys[idxInParent] oldSepV := left.parent.values[idxInParent] child := left.children[left.n] // copy the max key from left up to the separator left.parent.keys[idxInParent] = left.keys[left.n-1] left.parent.values[idxInParent] = left.values[left.n-1] // remove the max key/child from left var zeroK K left.keys[left.n-1] = zeroK var zeroV V left.values[left.n-1] = zeroV left.children[left.n] = nil left.n-- // move the old separator to the minimum key in right insertOne(right.keys[:], 0, oldSepK) insertOne(right.values[:], 0, oldSepV) insertOne(right.children[:], 0, child) if child != nil { child.parent = right } right.n++ } // | parent parent | // // | ┌───────────────┐ ┌───────────────┐ | // // | │ [c] │ │ [g] │ | // // | └╴•╶─╴•╶────────┘ └╴•╶─╴•╶────────┘ | // // | left ┌──────┘ └───────┐ right left ┌──────┘ └───────┐ right | // // | ┌───────┴───────┐ ┌───────┴───────┐ ┌───────┴───────┐ ┌───────┴───────┐ | // // | │ a b [ ] │ │ [g] h i │ ╶────> │ a b [c] │ │ [ ] h i │ | // // | └╴•╶─╴•╶─╴•╶────┘ └[•]─╴•╶─╴•╶─╴•╶┘ └╴•╶─╴•╶─╴•╶─[•]┘ └───╴•╶─╴•╶─╴•╶─┘ | // // | │ │ | // // | │ child │ child | // // | ┌───────┴───────┐ ┌───────┴───────┐ | // // | │ d e f │ │ d e f │ | // // | └╴•╶─╴•╶─╴•╶─╴•╶┘ └╴•╶─╴•╶─╴•╶─╴•╶┘ | // // // (Changes marked with []) // // Assumes left and right are siblings and left is not full. func (t *btree[K, V]) rotateLeft(left *node[K, V], right *node[K, V]) { idxInParent := xslices.Index(right.parent.children[:], right) oldSepK := right.parent.keys[idxInParent-1] oldSepV := right.parent.values[idxInParent-1] child := right.children[0] // copy the minimum key in right up to the separator right.parent.keys[idxInParent-1] = right.keys[0] right.parent.values[idxInParent-1] = right.values[0] // remove right's minimum key removeOne(right.keys[:], 0) removeOne(right.values[:], 0) removeOne(right.children[:], 0) right.n-- // move the old separator to the maximum key in left left.keys[left.n] = oldSepK left.values[left.n] = oldSepV left.children[left.n+1] = child if child != nil { child.parent = left } left.n++ } // If inNode is true, idx is the index in x.keys that k is at. If false, idx is the index of the // child to look in. func (t *btree[K, V]) searchNode(k K, x *node[K, V]) (idx int, inNode bool) { // benchmark suggests that linear search is in fact faster than binary search, at least for int // keys and branchFactor <= 32. for i := 0; i < int(x.n); i++ { if t.less(k, x.keys[i]) { return i, false } else if !t.less(x.keys[i], k) { return i, true } } return int(x.n), false } func leftmostLeaf[K any, V any](x *node[K, V]) *node[K, V] { curr := x for { if curr.leaf() { return curr } curr = curr.children[0] } } func rightmostLeaf[K any, V any](x *node[K, V]) *node[K, V] { curr := x for { if curr.leaf() { return curr } curr = curr.children[int(curr.n)] } } func removeOne[T any](a []T, idx int) { copy(a[idx:], a[idx+1:]) var zero T a[len(a)-1] = zero } // Inserts x into a at index idx, shifting the rest of the elements over. Clobbers a[len(a)-1]. // // Faster in this package than slices.Insert for use on node.{keys,values,children} since they never // grow. func insertOne[T any](a []T, idx int, x T) { copy(a[idx+1:], a[idx:]) a[idx] = x } type amalgam1[K any, V any] struct { keys *[maxKVs]K values *[maxKVs]V children *[branchFactor]*node[K, V] extraKey K extraValue V extraChild *node[K, V] extraIdx int } // Returns a cheap view that functions like a slice of all of the inputs. Assumes that both arrays // are in sorted order, and all leftKeys are less than sepKey and sepKey is less than all rightKeys. // extraKey may belong in any position. // // Example: // // keys extraKey // [a c e] + d // 0 1 2 3 extraChild // // amalgam // [a c d e] // 0 1 2 extraChild 3 func newAmalgam1[K any, V any]( less xsort.Less[K], keys *[maxKVs]K, values *[maxKVs]V, children *[branchFactor]*node[K, V], extraKey K, extraValue V, extraChild *node[K, V], ) amalgam1[K, V] { extraIdx := func() int { for i := range *keys { if less(extraKey, keys[i]) { return i } } return len(keys) }() return amalgam1[K, V]{ keys: keys, values: values, children: children, extraKey: extraKey, extraValue: extraValue, extraChild: extraChild, extraIdx: extraIdx, } } func (a *amalgam1[K, V]) Len() int { return maxKVs + 1 } func (a *amalgam1[K, V]) Key(i int) K { if i == a.extraIdx { return a.extraKey } else if i > a.extraIdx { i-- } return a.keys[i] } func (a *amalgam1[K, V]) Value(i int) V { if i == a.extraIdx { return a.extraValue } else if i > a.extraIdx { i-- } return a.values[i] } func (a *amalgam1[K, V]) Child(i int) *node[K, V] { if i == a.extraIdx+1 { return a.extraChild } else if i > a.extraIdx+1 { i-- } return a.children[i] } type cursor[K any, V any] struct { t *btree[K, V] // Set to nil when run off the edge. curr *node[K, V] // Index of k in curr. Used to notice when k has been moved or deleted. i int // last seen gen of tree gen int k K } func (c *cursor[K, V]) Next() { if c.lost() { c.SeekFirstGreater(c.k) return } if c.curr == nil { return } if c.curr.leaf() { c.i++ if c.i < int(c.curr.n) { c.k = c.curr.keys[c.i] return } } else if c.i < int(c.curr.n) { c.curr = leftmostLeaf(c.curr.children[c.i+1]) c.i = 0 c.k = c.curr.keys[c.i] return } for { if c.curr.parent == nil { c.curr = nil return } idx := xslices.Index(c.curr.parent.children[:], c.curr) c.curr = c.curr.parent c.i = idx if c.i < int(c.curr.n) { c.k = c.curr.keys[c.i] break } } } func (c *cursor[K, V]) Prev() { if c.lost() { c.SeekLastLess(c.k) return } if c.curr == nil { return } if c.curr.leaf() { c.i-- if c.i >= 0 { c.k = c.curr.keys[c.i] return } } else if c.i >= 0 { c.curr = rightmostLeaf(c.curr.children[c.i]) c.i = int(c.curr.n) - 1 c.k = c.curr.keys[c.i] return } for { if c.curr.parent == nil { c.curr = nil return } idx := xslices.Index(c.curr.parent.children[:], c.curr) c.curr = c.curr.parent c.i = idx - 1 if c.i >= 0 { c.k = c.curr.keys[c.i] break } } } func (c *cursor[K, V]) Ok() bool { return c.curr != nil && c.refind() } func (c *cursor[K, V]) Key() K { return c.k } func (c *cursor[K, V]) Value() V { var zero V if !c.refind() { return zero } return c.curr.values[c.i] } func (c *cursor[K, V]) valueUnchecked() V { return c.curr.values[c.i] } func (c *cursor[K, V]) SeekFirst() { if c.t.root.n == 0 { c.curr = nil return } c.curr = leftmostLeaf(c.t.root) c.i = 0 c.k = c.curr.keys[c.i] c.gen = c.t.gen } func (c *cursor[K, V]) SeekLastLess(k K) { if !c.seek(k) { return } if xsort.LessOrEqual(c.t.less, k, c.k) { c.Prev() } } func (c *cursor[K, V]) SeekLastLessOrEqual(k K) { if !c.seek(k) { return } if c.t.less(k, c.k) { c.Prev() } } func (c *cursor[K, V]) SeekFirstGreaterOrEqual(k K) { if !c.seek(k) { return } if xsort.Greater(c.t.less, k, c.k) { c.Next() } } func (c *cursor[K, V]) SeekFirstGreater(k K) { if !c.seek(k) { return } if xsort.GreaterOrEqual(c.t.less, k, c.k) { c.Next() } } func (c *cursor[K, V]) SeekLast() { if c.t.root.n == 0 { c.curr = nil return } c.curr = rightmostLeaf(c.t.root) c.i = int(c.curr.n) - 1 c.k = c.curr.keys[c.i] c.gen = c.t.gen } // seek moves the cursor to k or its successor or predecessor if it isn't in the tree. Returns false // if the cursor is now invalid because the tree is empty. func (c *cursor[K, V]) seek(k K) bool { c.curr, c.i, _ = c.find(k) if c.curr == nil { return false } c.k = c.curr.keys[c.i] c.gen = c.t.gen return true } // find looks for k in the tree. It returns the node that k appears in and the index it appears at // and true in the final return. If k is not in the tree, then the final return is false and the // returned node and index of a successor or predecessor of k. // // The returned node is nil if the tree is empty. func (c *cursor[K, V]) find(k K) (*node[K, V], int, bool) { if c.t.root.n == 0 { return nil, 0, false } curr := c.t.root for { idx, inNode := c.t.searchNode(k, curr) if inNode { return curr, idx, true } if curr.leaf() { if idx == int(curr.n) { idx-- } return curr, idx, false } curr = curr.children[idx] } } // refind ensures c.curr[c.i] == c.k if c.k is still in the tree (which could've been made false if // the tree was modified since the cursor found its position) by reseeking. Returns false without // modifying the cursor if c.k isn't in the tree anymore. func (c *cursor[K, V]) refind() bool { if !c.lost() { return true } curr, i, ok := c.find(c.k) if !ok { return false } c.curr = curr c.i = i c.gen = c.t.gen return true } // lost returns true if the tree has been modified in such a way that the cursor has lost its place. func (c *cursor[K, V]) lost() bool { // c.curr == nil implies the cursor is already off the edge of the tree and cannot be lost. // // Otherwise, check that the element of c.curr we're pointed at still contains the key we // expect, since it might've gotten shifted from e.g. deleting the element before this one. // Careful: x.keys[i] for i >= x.n is filled with the zero value, and c.k might happen to be the // zero value also. Unlinking a node during merge sets n=0, so that's handled here too. return c.gen != c.t.gen && c.curr != nil && (c.i >= int(c.curr.n) || !xsort.Equal(c.t.less, c.k, c.curr.keys[c.i])) } func (c *cursor[K, V]) Forward() iterator.Iterator[KVPair[K, V]] { return &forwardIterator[K, V]{c: *c} } type forwardIterator[K any, V any] struct { c cursor[K, V] } func (iter *forwardIterator[K, V]) Next() (KVPair[K, V], bool) { if iter.c.lost() { iter.c.SeekFirstGreaterOrEqual(iter.c.Key()) } if iter.c.curr == nil { var zero KVPair[K, V] return zero, false } k := iter.c.Key() // Safe since we already made sure !iter.c.lost() by reseeking above. v := iter.c.valueUnchecked() iter.c.Next() return KVPair[K, V]{k, v}, true } func (c *cursor[K, V]) Backward() iterator.Iterator[KVPair[K, V]] { return &backwardIterator[K, V]{c: *c} } type backwardIterator[K any, V any] struct { c cursor[K, V] } func (iter *backwardIterator[K, V]) Next() (KVPair[K, V], bool) { if iter.c.lost() { iter.c.SeekLastLessOrEqual(iter.c.Key()) } if iter.c.curr == nil { var zero KVPair[K, V] return zero, false } k := iter.c.Key() // Safe since we already made sure !iter.c.lost() by reseeking above. v := iter.c.valueUnchecked() iter.c.Prev() return KVPair[K, V]{k, v}, true } func (t *btree[K, V]) Range(lower Bound[K], upper Bound[K]) iterator.Iterator[KVPair[K, V]] { c := t.Cursor() switch lower.type_ { case boundUnbounded: c.SeekFirst() case boundInclude: c.SeekFirstGreaterOrEqual(lower.key) case boundExclude: c.SeekFirstGreater(lower.key) default: panic("unknown bound") } switch upper.type_ { case boundInclude: return iterator.While(c.Forward(), func(pair KVPair[K, V]) bool { return xsort.LessOrEqual(t.less, pair.Key, upper.key) }) case boundExclude: return iterator.While(c.Forward(), func(pair KVPair[K, V]) bool { return t.less(pair.Key, upper.key) }) case boundUnbounded: return c.Forward() default: panic("unknown bound") } } func (t *btree[K, V]) RangeReverse(lower Bound[K], upper Bound[K]) iterator.Iterator[KVPair[K, V]] { c := t.Cursor() switch upper.type_ { case boundInclude: c.SeekLastLessOrEqual(upper.key) case boundExclude: c.SeekLastLess(upper.key) case boundUnbounded: c.SeekLast() default: panic("unknown bound") } switch lower.type_ { case boundInclude: return iterator.While(c.Backward(), func(pair KVPair[K, V]) bool { return xsort.GreaterOrEqual(t.less, pair.Key, lower.key) }) case boundExclude: return iterator.While(c.Backward(), func(pair KVPair[K, V]) bool { return xsort.Greater(t.less, pair.Key, lower.key) }) case boundUnbounded: return c.Backward() default: panic("unknown bound") } } juniper-0.15.1/container/tree/btree_benchmark_test.go000066400000000000000000000275531453027036000227100ustar00rootroot00000000000000package tree import ( "fmt" "testing" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xmath/xrand" "github.com/bradenaw/juniper/xsort" ) // Run under WSL since I don't have a native Linux machine handy at the moment. // // Obviously not 100% scientific since there are more dimensions than represented here. The builtin // map reallocates at different points, so its alloc/op is sort of random numbers depending on how // well the benchmark size fits. These are all for int keys and values using the builtin < to // compare, and things may shift for different sized types and differently complex comparisons. // // However, in this small study, the btree map is about half as fast as the builtin hashmap for gets // and puts, and will usually use less memory. The builtin is (expectedly) about 4.5x faster at // replacing keys that are already present. // // The purpose was to pick branchFactor. Too small and we waste more space and, with more // allocations, put more strain on GC. Too large we spend more time searching and shifting inside // nodes. branchFactor=32 requires a similar number of objects to the builtin map, which may have // visible advantages for GC in a real program. Unfortunately, we'll have to wait until there's a // real program using it to find out which does better. branchFactor=16 seems like a decent balance // for now, it's nearly as small memory-wise and is a little faster than branchFactor=32 at nearly // everything. // // In addition to drastically reducing allocations, this B-tree implementation drastically // outperformed a now-removed AVL tree implementation on everything except Get, which branchFactor=4 // is about 10% slower than and branchFactor=16 is about 20% slower than. Outperformance on writes // is unsurprising, since allocation is a significant cost. Reads get better cache locality with the // B-tree, but have to call less more. // // // goos: linux // goarch: amd64 // cpu: Intel(R) Core(TM) i7-10700K CPU @ 3.80GHz // // benchmark size builtin map btree // // branchFactor=4 branchFactor=8 branchFactor=16 branchFactor=32 // // time ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────── // // Get 10 10.7ns 15.03 ns/op 17.00 ns/op 21.27 ns/op 21.25 ns/op // // 100 11.2ns 25.79 ns/op 25.93 ns/op 36.56 ns/op 39.52 ns/op // // 1000 16.2ns 48.51 ns/op 50.57 ns/op 58.23 ns/op 67.26 ns/op // // 10000 22.9ns 63.11 ns/op 63.58 ns/op 71.59 ns/op 84.33 ns/op // // 100000 28.1ns 70.84 ns/op 74.67 ns/op 83.78 ns/op 102.1 ns/op // // 1000000 51.5ns 81.46 ns/op 89.05 ns/op 94.68 ns/op 116.0 ns/op // // Put 10 42.6ns 54.55 ns/op 49.36 ns/op 41.41 ns/op 45.82 ns/op // // 100 55.8ns 72.56 ns/op 63.41 ns/op 65.07 ns/op 88.99 ns/op // // 1000 65.0ns 127.0 ns/op 107.4 ns/op 108.3 ns/op 123.6 ns/op // // 10000 60.0ns 158.3 ns/op 131.2 ns/op 135.7 ns/op 154.2 ns/op // // 100000 61.9ns 217.6 ns/op 183.6 ns/op 181.5 ns/op 203.9 ns/op // // 1000000 112ns 386.5 ns/op 308.8 ns/op 264.2 ns/op 297.2 ns/op // // PutAlreadyPresent 10 13.7ns 18.19 ns/op 17.68 ns/op 21.92 ns/op 21.40 ns/op // // 100 15.2ns 27.27 ns/op 28.76 ns/op 37.43 ns/op 44.87 ns/op // // 1000 20.3ns 65.64 ns/op 56.90 ns/op 62.54 ns/op 75.38 ns/op // // 10000 25.8ns 104.3 ns/op 86.67 ns/op 86.52 ns/op 107.1 ns/op // // 100000 32.1ns 155.1 ns/op 126.1 ns/op 127.8 ns/op 142.7 ns/op // // 1000000 62.4ns 385.3 ns/op 293.5 ns/op 281.0 ns/op 270.7 ns/op // // Iterate 10 125.9 ns/op 171.8 ns/op 160.1 ns/op 150.3 ns/op 150.3 ns/op // // 100 1068 ns/op 1611 ns/op 1257 ns/op 1058 ns/op 947.4 ns/op // // 1000 12572 ns/op 14581 ns/op 12062 ns/op 10375 ns/op 9348 ns/op // // 10000 109337 ns/op 189691 ns/op 142145 ns/op 112312 ns/op 96263 ns/op // // 100000 1028813 ns/op 2060605 ns/op 1447266 ns/op 1145753 ns/op 984982 ns/op // // 1000000 13181522 ns/op 66242895 ns/op 33671273 ns/op 19829590 ns/op 14673612 ns/op // // // // alloc bytes ───────────────────────────────────────────────────────────────────────────────────────────────────────────── // // Put 10 48 B/op 50 B/op 60 B/op 40 B/op 79 B/op // // 100 55 B/op 47 B/op 40 B/op 34 B/op 46 B/op // // 1000 86 B/op 50 B/op 40 B/op 36 B/op 37 B/op // // 10000 68 B/op 49 B/op 40 B/op 37 B/op 35 B/op // // 100000 57 B/op 50 B/op 40 B/op 37 B/op 36 B/op // // 1000000 86 B/op 49 B/op 40 B/op 37 B/op 35 B/op // // // // alloc objects ─────────────────────────────────────────────────────────────────────────────────────────────────────────── // // Build 10 1 3 1 1 1 // // 100 16 50 17 8 3 // // 1000 64 518 170 96 37 // // 10000 276 5.1K 1.7K 971 375 // // 100000 4.0K 51K 16.9K 9.6K 3.7K // // 1000000 38.0K 514K 169K 96K 37K // var sizes = []int{10, 100, 1_000, 10_000, 100_000, 1_000_000} func BenchmarkBtreeMapGet(b *testing.B) { for _, size := range sizes { m := NewMap[int, int](xsort.OrderedLess[int]) for i := 0; i < size; i++ { m.Put(i, i) } b.Run(fmt.Sprintf("Size=%d,BranchFactor=%d", size, branchFactor), func(b *testing.B) { for i := 0; i < b.N; i++ { _ = m.Get(i % size) } }) } } func BenchmarkBuiltinMapGet(b *testing.B) { for _, size := range sizes { m := make(map[int]int, size) for i := 0; i < size; i++ { m[i] = i } b.Run(fmt.Sprintf("Size=%d", size), func(b *testing.B) { for i := 0; i < b.N; i++ { _ = m[i%size] } }) } } func BenchmarkBtreeMapPut(b *testing.B) { for _, size := range sizes { m := NewMap[int, int](xsort.OrderedLess[int]) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) b.Run(fmt.Sprintf("Size=%d,BranchFactor=%d", size, branchFactor), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { m.Put(keys[i%size], i) if m.Len() == size { m = NewMap[int, int](xsort.OrderedLess[int]) } } }) } } func BenchmarkBuiltinMapPut(b *testing.B) { for _, size := range sizes { m := make(map[int]int) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) b.Run(fmt.Sprintf("Size=%d", size), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { m[keys[i%size]] = i if len(m) == size { m = make(map[int]int) } } }) } } func BenchmarkBtreeMapPutAlreadyPresent(b *testing.B) { for _, size := range sizes { m := NewMap[int, int](xsort.OrderedLess[int]) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) for _, k := range keys { m.Put(k, 0) } b.Run(fmt.Sprintf("Size=%d,BranchFactor=%d", size, branchFactor), func(b *testing.B) { for i := 0; i < b.N; i++ { m.Put(keys[i%size], i) } }) } } func BenchmarkBuiltinMapPutAlreadyPresent(b *testing.B) { for _, size := range sizes { m := make(map[int]int) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) for _, k := range keys { m[k] = 0 } b.Run(fmt.Sprintf("Size=%d", size), func(b *testing.B) { for i := 0; i < b.N; i++ { m[keys[i%size]] = i } }) } } func BenchmarkBtreeMapIterate(b *testing.B) { for _, size := range sizes { m := NewMap[int, int](xsort.OrderedLess[int]) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) for i, k := range keys { m.Put(k, i) } b.Run(fmt.Sprintf("Size=%d,BranchFactor=%d", size, branchFactor), func(b *testing.B) { for i := 0; i < b.N; i++ { iter := m.Iterate() for { _, ok := iter.Next() if !ok { break } } } }) } } func BenchmarkBuiltinMapIterate(b *testing.B) { for _, size := range sizes { m := make(map[int]int) keys := iterator.Collect(iterator.Counter(size)) xrand.Shuffle(keys) for i, k := range keys { m[k] = i } b.Run(fmt.Sprintf("Size=%d", size), func(b *testing.B) { for i := 0; i < b.N; i++ { for _, _ = range m { } } }) } } func BenchmarkBtreeMapBuild(b *testing.B) { for _, size := range sizes { keys := iterator.Collect(iterator.Counter(size)) b.Run(fmt.Sprintf("Size=%d,BranchFactor=%d", size, branchFactor), func(b *testing.B) { b.ReportAllocs() for i := 1; i < b.N; i++ { b.StopTimer() xrand.Shuffle(keys) m := NewMap[int, int](xsort.OrderedLess[int]) b.StartTimer() for j := 0; j < size; j++ { m.Put(keys[j], j) } } }) } } func BenchmarkBuiltinMapBuild(b *testing.B) { for _, size := range sizes { keys := iterator.Collect(iterator.Counter(size)) b.Run(fmt.Sprintf("Size=%d", size), func(b *testing.B) { b.ReportAllocs() for i := 1; i < b.N; i++ { b.StopTimer() xrand.Shuffle(keys) m := make(map[int]int) b.StartTimer() for j := 0; j < size; j++ { m[keys[j]] = j } } }) } } juniper-0.15.1/container/tree/btree_test.go000066400000000000000000000435631453027036000206750ustar00rootroot00000000000000package tree import ( "fmt" "strings" "testing" "github.com/bradenaw/juniper/internal/fuzz" "github.com/bradenaw/juniper/internal/orderedhashmap" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xslices" "github.com/bradenaw/juniper/xsort" ) func orderedhashmapKVPairToKVPair[K any, V any](kv orderedhashmap.KVPair[uint16, int]) KVPair[uint16, int] { return KVPair[uint16, int]{kv.K, kv.V} } func FuzzBtree(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte) { tree := newBtree[uint16, int](xsort.OrderedLess[uint16]) cursor := tree.Cursor() cursor.SeekFirst() oracle := orderedhashmap.NewMap[uint16, int](xsort.OrderedLess[uint16]) oracleCursor := oracle.Cursor() ctr := 0 fuzz.Operations( b, func() { // check t.Log(treeToString(tree)) if oracleCursor.Ok() { t.Logf("oracleCursor @ %#v", oracleCursor.Key()) } else { t.Log("oracleCursor off the edge") } checkTree(t, tree) require2.Equal(t, oracle.Len(), tree.Len()) oraclePairs := iterator.Collect( iterator.Map(oracle.Cursor().Forward(), orderedhashmapKVPairToKVPair[uint16, int]), ) xsort.Slice(oraclePairs, func(a, b KVPair[uint16, int]) bool { return a.Key < b.Key }) c := tree.Cursor() c.SeekFirst() treePairs := iterator.Collect(c.Forward()) require2.SlicesEqual(t, oraclePairs, treePairs) require2.Equalf(t, oracleCursor.Ok(), cursor.Ok(), "cursor.Ok()") if oracleCursor.Ok() { require2.Equal(t, oracleCursor.Key(), cursor.Key()) require2.Equal(t, oracleCursor.Value(), cursor.Value()) } }, func(k uint16) { v := ctr t.Logf("tree.Put(%#v, %#v)", k, v) tree.Put(k, v) oracle.Put(k, v) ctr++ }, func(k uint16) { expected := oracle.Get(k) t.Logf("tree.Get(%#v) -> %#v", k, expected) actual := tree.Get(k) require2.Equal(t, expected, actual) }, func(k uint16) { t.Logf("tree.Delete(%#v)", k) tree.Delete(k) oracle.Delete(k) }, func(k uint16) { oracleOk := oracle.Contains(k) t.Logf("tree.Contains(%#v) -> %t", k, oracleOk) treeOk := tree.Contains(k) require2.Equal(t, oracleOk, treeOk) }, func() { t.Logf("tree.First()") k, v := tree.First() expectedK, expectedV := oracle.First() require2.Equal(t, expectedK, k) require2.Equal(t, expectedV, v) }, func() { t.Logf("tree.Last()") k, v := tree.Last() expectedK, expectedV := oracle.Last() require2.Equal(t, expectedK, k) require2.Equal(t, expectedV, v) }, func() { t.Log("cursor.Next()") cursor.Next() oracleCursor.Next() }, func() { t.Log("cursor.Prev()") cursor.Prev() oracleCursor.Prev() }, func() { t.Log("cursor.SeekFirst()") cursor.SeekFirst() oracleCursor.SeekFirst() }, func(k uint16) { t.Logf("cursor.SeekLastLess(%#v)", k) cursor.SeekLastLess(k) oracleCursor.SeekLastLess(k) }, func(k uint16) { t.Logf("cursor.SeekLastLessOrEqual(%#v)", k) cursor.SeekLastLessOrEqual(k) oracleCursor.SeekLastLessOrEqual(k) }, func(k uint16) { t.Logf("cursor.SeekFirstGreaterOrEqual(%#v)", k) cursor.SeekFirstGreaterOrEqual(k) oracleCursor.SeekFirstGreaterOrEqual(k) }, func(k uint16) { t.Logf("cursor.SeekFirstGreater(%#v)", k) cursor.SeekFirstGreater(k) oracleCursor.SeekFirstGreater(k) }, func() { t.Log("cursor.SeekLast()") cursor.SeekLast() oracleCursor.SeekLast() }, func() { t.Log("cursor.Forward()") kvs := iterator.Collect(cursor.Forward()) expectedKVs := iterator.Collect(iterator.Map( oracleCursor.Forward(), orderedhashmapKVPairToKVPair[uint16, int], )) require2.SlicesEqual(t, expectedKVs, kvs) }, func() { t.Log("cursor.Backward()") kvs := iterator.Collect(cursor.Backward()) expectedKVs := iterator.Collect(iterator.Map( oracleCursor.Backward(), orderedhashmapKVPairToKVPair[uint16, int], )) require2.SlicesEqual(t, expectedKVs, kvs) }, ) }) } func TestSplitRoot(t *testing.T) { if branchFactor != 16 { t.Skip("test requires branchFactor 16") } tree := makeTree(t, makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}, {8, 8}, {9, 9}, {10, 10}, {11, 11}, {12, 12}, {13, 13}, {14, 14}, })) require2.Equal(t, 1, numNodes(tree)) tree.Put(15, 15) require2.Equal(t, 3, numNodes(tree)) requireTreesEqual( t, tree, makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}, }), KVPair[byte, int]{8, 8}, makeLeaf([]KVPair[byte, int]{ {9, 9}, {10, 10}, {11, 11}, {12, 12}, {13, 13}, {14, 14}, {15, 15}, }), )), ) } func TestMerge(t *testing.T) { if branchFactor != 16 { t.Skip("test requires branchFactor 16") } tree := makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, }), KVPair[byte, int]{10, 10}, makeLeaf([]KVPair[byte, int]{ {20, 20}, {21, 21}, {22, 22}, {23, 23}, {24, 24}, {25, 25}, {26, 26}, }), )) require2.Equal(t, tree.root.children[0].n, minKVs) require2.Equal(t, tree.root.children[1].n, minKVs) tree.Delete(23) checkTree(t, tree) requireTreesEqual( t, tree, makeTree(t, makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {10, 10}, {20, 20}, {21, 21}, {22, 22}, {24, 24}, {25, 25}, {26, 26}, })), ) } func TestRotateRight(t *testing.T) { if branchFactor != 16 { t.Skip("test requires branchFactor 16") } tree := makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}, }), KVPair[byte, int]{10, 10}, makeLeaf([]KVPair[byte, int]{ {20, 20}, {21, 21}, {22, 22}, {23, 23}, {24, 24}, {25, 25}, {26, 26}, }), )) require2.Equal(t, tree.root.children[0].n, minKVs+1) require2.Equal(t, tree.root.children[1].n, minKVs) tree.Delete(20) checkTree(t, tree) requireTreesEqual( t, tree, makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, }), KVPair[byte, int]{7, 7}, makeLeaf([]KVPair[byte, int]{ {10, 10}, {21, 21}, {22, 22}, {23, 23}, {24, 24}, {25, 25}, {26, 26}, {27, 27}, }), )), ) } func TestRotateLeft(t *testing.T) { if branchFactor != 16 { t.Skip("test requires branchFactor 16") } tree := makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, }), KVPair[byte, int]{10, 10}, makeLeaf([]KVPair[byte, int]{ {20, 20}, {21, 21}, {22, 22}, {23, 23}, {24, 24}, {25, 25}, {26, 26}, {27, 27}, }), )) require2.Equal(t, tree.root.children[0].n, minKVs) require2.Equal(t, tree.root.children[1].n, minKVs+1) tree.Delete(0) checkTree(t, tree) requireTreesEqual( t, tree, makeTree(t, makeInternal( makeLeaf([]KVPair[byte, int]{ {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {10, 10}, }), KVPair[byte, int]{20, 20}, makeLeaf([]KVPair[byte, int]{ {21, 21}, {22, 22}, {23, 23}, {24, 24}, {25, 25}, {26, 26}, {27, 27}, }), )), ) } func TestMergeMulti(t *testing.T) { tree := newBtree[uint16, int](xsort.OrderedLess[uint16]) i := 0 for treeHeight(tree) < 3 { tree.Put(uint16(i), i) i++ } for { had := false breadthFirst(tree, func(x *node[uint16, int]) bool { t.Logf("visit(%p)", x) if x.n > minKVs { tree.Delete(x.keys[0]) had = true return false } return true }) require2.Equal(t, treeHeight(tree), 3) checkTree(t, tree) if !had { break } } c := tree.Cursor() c.SeekFirst() expected := iterator.Collect(c.Forward()) nNodesBefore := numNodes(tree) var removed uint16 t.Logf(treeToString(tree)) breadthFirst(tree, func(x *node[uint16, int]) bool { if x.leaf() { removed = x.keys[0] tree.Delete(x.keys[0]) return false } return true }) checkTree(t, tree) t.Logf("removed %#v", removed) t.Logf(treeToString(tree)) expected = xslices.Remove( expected, xslices.IndexFunc(expected, func(pair KVPair[uint16, int]) bool { return pair.Key == removed }), 1, ) nNodesAfter := numNodes(tree) c = tree.Cursor() c.SeekFirst() actual := iterator.Collect(c.Forward()) require2.SlicesEqual(t, expected, actual) require2.Equal(t, nNodesBefore-3, nNodesAfter) } func breadthFirst[K any, V any](tree *btree[K, V], visit func(*node[K, V]) bool) { queue := []*node[K, V]{tree.root} for len(queue) > 0 { var curr *node[K, V] curr, queue = queue[0], queue[1:] if !visit(curr) { return } if !curr.leaf() { for i := 0; i <= int(curr.n); i++ { queue = append(queue, curr.children[i]) } } } } func requireTreesEqual(t *testing.T, a, b *btree[byte, int]) { eq := func() bool { var visit func(x, y *node[byte, int]) bool visit = func(x, y *node[byte, int]) bool { if (x == nil) != (y == nil) { return false } if x == nil && y == nil { return true } if x.n != y.n { return false } if x.leaf() != y.leaf() { return false } for i := 0; i < int(x.n); i++ { if x.keys[i] != y.keys[i] { return false } if x.values[i] != y.values[i] { return false } } if x.leaf() { for i := 0; i < int(x.n)+1; i++ { if !visit(x.children[i], y.children[i]) { return false } } } return true } return visit(a.root, b.root) }() if !eq { t.Fatalf("%s\n\n%s", treeToStringNoPtr(a), treeToStringNoPtr(b)) } } func makeTree(t *testing.T, root *node[byte, int]) *btree[byte, int] { tree := &btree[byte, int]{ root: root, less: xsort.OrderedLess[byte], } tree.size = numItems(tree) checkTree(t, tree) return tree } func makeInternal(items ...any) *node[byte, int] { x := &node[byte, int]{n: int8(len(items) / 2)} for i := 0; i < int(x.n)+1; i++ { x.children[i] = items[i*2].(*node[byte, int]) x.children[i].parent = x } for i := 0; i < int(x.n); i++ { pair := items[i*2+1].(KVPair[byte, int]) x.keys[i] = pair.Key x.values[i] = pair.Value } return x } func makeLeaf(kvs []KVPair[byte, int]) *node[byte, int] { x := &node[byte, int]{n: int8(len(kvs))} for i := range kvs { x.keys[i] = kvs[i].Key x.values[i] = kvs[i].Value } return x } func pairsRange(min, max byte) []KVPair[byte, int] { var out []KVPair[byte, int] for i := min; i < max; i++ { out = append(out, KVPair[byte, int]{byte(i), int(i)}) } return out } func isZero[T comparable](t T) bool { var zero T return t == zero } func numNodes[K any, V any](tree *btree[K, V]) int { n := 0 var visit func(x *node[K, V]) visit = func(x *node[K, V]) { n++ if x.leaf() { return } for i := 0; i < int(x.n)+1; i++ { visit(x.children[i]) } } visit(tree.root) return n } func numItems[K any, V any](tree *btree[K, V]) int { n := 0 var visit func(x *node[K, V]) visit = func(x *node[K, V]) { n += int(x.n) if x.leaf() { return } for i := 0; i < int(x.n)+1; i++ { visit(x.children[i]) } } visit(tree.root) return n } func treeHeight[K any, V any](tree *btree[K, V]) int { curr := tree.root n := 0 for curr != nil { n += 1 curr = curr.children[0] } return n } func checkTree[K comparable, V comparable](t *testing.T, tree *btree[K, V]) { foundLeaf := false leafDepth := 0 var checkNode func(x *node[K, V], depth int) checkNode = func(x *node[K, V], depth int) { if x.leaf() { for i := 0; i < int(x.n)+1; i++ { require2.Nil(t, x.children[i]) } if !foundLeaf { leafDepth = depth foundLeaf = true } require2.Equal(t, leafDepth, depth) } else { for i := 0; i < int(x.n)+1; i++ { require2.NotNil(t, x.children[i]) require2.Truef( t, x.children[i].parent == x, "%p ─child─> %p ─parent─> %p", x, x.children[i], x.children[i].parent, ) checkNode(x.children[i], depth+1) } for i := 0; i < int(x.n); i++ { left := x.children[i] right := x.children[i+1] k := x.keys[i] require2.True(t, tree.less(left.keys[int(left.n)-1], k)) require2.True(t, tree.less(k, right.keys[0])) } } if x == tree.root { if tree.Len() > 0 { require2.GreaterOrEqual(t, int(x.n), 1) } } else { require2.GreaterOrEqual(t, int(x.n), minKVs) } require2.True(t, xsort.SliceIsSorted(x.keys[:int(x.n)], tree.less)) require2.True(t, xslices.All(x.keys[int(x.n):], isZero[K])) require2.True(t, xslices.All(x.values[int(x.n):], isZero[V])) require2.Truef( t, xslices.All(x.children[int(x.n)+1:], isZero[*node[K, V]]), "%p %#v", x, x.children[int(x.n)+1:], ) } require2.NotNil(t, tree.root) require2.Nil(t, tree.root.parent) checkNode(tree.root, 0) } // Returns a graphviz DOT representation of tree. (https://graphviz.org/doc/info/lang.html) func treeToString[K any, V any](tree *btree[K, V]) string { return treeToStringInner(tree, func(x *node[K, V]) string { return fmt.Sprintf("%p", x) }) } func treeToStringNoPtr[K any, V any](tree *btree[K, V]) string { ids := make(map[*node[K, V]]string) ctr := 0 return treeToStringInner(tree, func(x *node[K, V]) string { id, ok := ids[x] if !ok { id = fmt.Sprintf("%d", ctr) ctr++ ids[x] = id } return id }) } func treeToStringInner[K any, V any](tree *btree[K, V], id func(*node[K, V]) string) string { var sb strings.Builder var logNode func(x *node[K, V]) logNode = func(x *node[K, V]) { fmt.Fprintf(&sb, "\tnode%s [label=\"{%s|{", id(x), id(x)) for i := 0; i < int(x.n); i++ { fmt.Fprintf(&sb, " |%#v: %#v|", i, x.keys[i], x.values[i]) } fmt.Fprintf(&sb, " ", x.n) sb.WriteString("}}\"];\n") for i, child := range x.children { if child == nil { continue } fmt.Fprintf(&sb, "\tnode%s:c%d -> node%s;\n", id(x), i, id(child)) } for _, child := range x.children { if child == nil { continue } logNode(child) } } sb.WriteString("digraph btree {\n\tnode [shape=record];\n") logNode(tree.root) sb.WriteString("}") return sb.String() } func TestAmalgam1(t *testing.T) { keys := [maxKVs]byte{} values := [maxKVs]byte{} for i := range keys { keys[i] = byte((i + 1) * 2) values[i] = byte((i + 1) * 4) } children := [branchFactor]*node[byte, byte]{} for i := range children { children[i] = &node[byte, byte]{} } extraChild := &node[byte, byte]{} check := func( extraKey byte, extraValue byte, ) { t.Run(fmt.Sprintf("extraKey=%d,extraValue=%d", extraKey, extraValue), func(t *testing.T) { var expectedKeys [maxKVs + 1]byte copy(expectedKeys[:], keys[:]) var expectedValues [maxKVs + 1]byte copy(expectedValues[:], values[:]) var expectedChildren [branchFactor + 1]*node[byte, byte] copy(expectedChildren[:], children[:]) idx := xsort.Search(expectedKeys[:len(expectedKeys)-1], xsort.OrderedLess[byte], extraKey) xslices.Insert(expectedKeys[:len(expectedKeys)-1], idx, extraKey) xslices.Insert(expectedValues[:len(expectedKeys)-1], idx, extraValue) xslices.Insert(expectedChildren[:len(expectedChildren)-1], idx+1, extraChild) require2.Truef( t, xsort.SliceIsSorted(expectedKeys[:], xsort.OrderedLess[byte]), "%#v", expectedKeys, ) a := newAmalgam1( xsort.OrderedLess[byte], &keys, &values, &children, extraKey, extraValue, extraChild, ) t.Logf("extraIdx=%d", a.extraIdx) var actualKeys [maxKVs + 1]byte var actualValues [maxKVs + 1]byte var actualChildren [branchFactor + 1]*node[byte, byte] for i := 0; i < len(actualKeys); i++ { actualKeys[i] = a.Key(i) actualValues[i] = a.Value(i) actualChildren[i] = a.Child(i) } actualChildren[len(actualChildren)-1] = a.Child(len(actualChildren) - 1) require2.Equal(t, expectedKeys, actualKeys) require2.Equal(t, expectedValues, actualValues) require2.Equal(t, expectedChildren, actualChildren) }) } for i := 0; i < maxKVs+1; i++ { check(byte(i*2+1), byte(i*4+1)) } } func TestRange(t *testing.T) { tree := newBtree[uint16, int](xsort.OrderedLess[uint16]) for i := 0; i < 128; i++ { tree.Put(uint16(i), i) } keys := func(iter iterator.Iterator[KVPair[uint16, int]]) []uint16 { return iterator.Collect(iterator.Map(iter, func(pair KVPair[uint16, int]) uint16 { return pair.Key })) } check := func(lower Bound[uint16], upper Bound[uint16], expected []uint16) { require2.SlicesEqual(t, keys(tree.Range(lower, upper)), expected) r := keys(tree.RangeReverse(lower, upper)) xslices.Reverse(r) require2.SlicesEqual(t, r, expected) } check( Included(uint16(5)), Included(uint16(16)), []uint16{5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, ) check( Unbounded[uint16](), Excluded(uint16(4)), []uint16{0, 1, 2, 3}, ) check( Excluded(uint16(123)), Unbounded[uint16](), []uint16{124, 125, 126, 127}, ) } func TestGetContains(t *testing.T) { tree := newBtree[uint16, int](xsort.OrderedLess[uint16]) for i := 0; i < 128; i++ { tree.Put(uint16(i*2), i*4) } for i := 0; i < 128; i++ { key := uint16(i * 2) require2.True(t, tree.Contains(key)) require2.Equal(t, tree.Get(key), int(key)*2) } for i := 0; i <= 128; i++ { key := uint16(i*2 - 1) require2.True(t, !tree.Contains(key)) require2.Equal(t, tree.Get(key), 0) } } func TestDelete(t *testing.T) { tree := newBtree[uint16, int](xsort.OrderedLess[uint16]) for i := 0; i < 128; i++ { tree.Put(uint16(i)+1, i*2) } require2.Equal(t, tree.Len(), 128) tree.Delete(0) tree.Delete(129) require2.Equal(t, tree.Len(), 128) for tree.Len() > 0 { key := uint16(0) l := tree.Len() if tree.Len()%2 == 0 { key, _ = tree.First() } else { key, _ = tree.Last() } require2.True(t, tree.Contains(key)) tree.Delete(key) require2.True(t, !tree.Contains(key)) require2.Equal(t, tree.Len(), l-1) } } juniper-0.15.1/container/tree/doc.go000066400000000000000000000002371453027036000172710ustar00rootroot00000000000000// Package tree contains an implementation of a B-tree Map and Set. These are similar to Go's map // built-in, but keep elements in sorted order. package tree juniper-0.15.1/container/tree/map.go000066400000000000000000000077551453027036000173150ustar00rootroot00000000000000package tree import ( "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) type KVPair[K any, V any] struct { Key K Value V } // Map is a tree-structured key-value map, similar to Go's built-in map but keeps elements in sorted // order by key. // // It is safe for multiple goroutines to Put concurrently with keys that are already in the map. type Map[K any, V any] struct { // An extra indirect here so that tree.Map behaves like a reference type like the map builtin. t *btree[K, V] } // NewMap returns a Map that uses less to determine the sort order of keys. If !less(a, b) && // !less(b, a), then a and b are considered the same key. The output of less must not change for any // pair of keys while they are in the map. func NewMap[K any, V any](less xsort.Less[K]) Map[K, V] { return Map[K, V]{ t: newBtree[K, V](less), } } // Len returns the number of elements in the map. func (m Map[K, V]) Len() int { return m.t.size } // Put inserts the key-value pair into the map, overwriting the value for the key if it already // exists. func (m Map[K, V]) Put(k K, v V) { m.t.Put(k, v) } // Delete removes the given key from the map. func (m Map[K, V]) Delete(k K) { m.t.Delete(k) } // Get returns the value associated with the given key if it is present in the map. Otherwise, it // returns the zero-value of V. func (m Map[K, V]) Get(k K) V { return m.t.Get(k) } // Contains returns true if the given key is present in the map. func (m Map[K, V]) Contains(k K) bool { return m.t.Contains(k) } // First returns the lowest-keyed entry in the map according to less. func (m Map[K, V]) First() (K, V) { return m.t.First() } // Last returns the highest-keyed entry in the map according to less. func (m Map[K, V]) Last() (K, V) { return m.t.Last() } // Iterate returns an iterator that yields the elements of the map in ascending order by key. // // The map may be safely modified during iteration and the iterator will continue from the // next-lowest key. Thus the iterator will see new elements that are after the current position of // the iterator according to less, but will not necessarily see a consistent snapshot of the state // of the map. func (m Map[K, V]) Iterate() iterator.Iterator[KVPair[K, V]] { return m.Range(Unbounded[K](), Unbounded[K]()) } type boundType int const ( boundInclude boundType = iota + 1 boundExclude boundUnbounded ) // Bound is an endpoint for a range. type Bound[K any] struct { type_ boundType key K } // Included returns a Bound that goes up to and including key. func Included[K any](key K) Bound[K] { return Bound[K]{type_: boundInclude, key: key} } // Excluded returns a Bound that goes up to but not including key. func Excluded[K any](key K) Bound[K] { return Bound[K]{type_: boundExclude, key: key} } // Unbounded returns a Bound at the end of the collection. func Unbounded[K any]() Bound[K] { return Bound[K]{type_: boundUnbounded} } // Range returns an iterator that yields the elements of the map between the given bounds in // ascending order by key. // // The map may be safely modified during iteration and the iterator will continue from the // next-lowest key. Thus the iterator will see new elements that are after the current position of // the iterator according to less, but will not necessarily see a consistent snapshot of the state // of the map. func (m Map[K, V]) Range(lower Bound[K], upper Bound[K]) iterator.Iterator[KVPair[K, V]] { return m.t.Range(lower, upper) } // RangeReverse returns an iterator that yields the elements of the map between the given bounds in // descending order by key. // // The map may be safely modified during iteration and the iterator will continue from the // next-lowest key. Thus the iterator will see new elements that are after the current position of // the iterator according to less, but will not necessarily see a consistent snapshot of the state // of the map. func (m Map[K, V]) RangeReverse(lower Bound[K], upper Bound[K]) iterator.Iterator[KVPair[K, V]] { return m.t.RangeReverse(lower, upper) } juniper-0.15.1/container/tree/set.go000066400000000000000000000062021453027036000173150ustar00rootroot00000000000000package tree import ( "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) // Set is a tree-structured set. Sets are a collection of unique elements. Similar to Go's built-in // map[T]struct{} but keeps elements in sorted order. type Set[T any] struct { // An extra indirect here so that tree.Set behaves like a reference type like the map builtin. t *btree[T, struct{}] } // NewSet returns a Set that uses less to determine the sort order of items. If !less(a, b) && // !less(b, a), then a and b are considered the same item. The output of less must not change for // any pair of items while they are in the set. func NewSet[T any](less xsort.Less[T]) Set[T] { return Set[T]{ t: newBtree[T, struct{}](less), } } // Len returns the number of elements in the set. func (s Set[T]) Len() int { return s.t.size } // Add adds item to the set if it is not already present. func (s Set[T]) Add(item T) { s.t.Put(item, struct{}{}) } // Remove removes item from the set if it is present, and does nothing otherwise. func (s Set[T]) Remove(item T) { s.t.Delete(item) } // Contains returns true if item is present in the set. func (s Set[T]) Contains(item T) bool { return s.t.Contains(item) } // First returns the lowest item in the set according to less. func (s Set[T]) First() T { item, _ := s.t.First() return item } // Last returns the highest item in the set according to less. func (s Set[T]) Last() T { item, _ := s.t.Last() return item } // Iterate returns an iterator that yields the elements of the set in ascending order. // // The set may be safely modified during iteration and the iterator will continue from the // next-lowest item. Thus the iterator will see new items that are after the current position // of the iterator according to less, but will not necessarily see a consistent snapshot of the // state of the set. func (s Set[T]) Iterate() iterator.Iterator[T] { return s.Range(Unbounded[T](), Unbounded[T]()) } // Range returns an iterator that yields the elements of the set between the given bounds in // ascending order. // // The set may be safely modified during iteration and the iterator will continue from the // next-lowest item. Thus the iterator will see new items that are after the current position // of the iterator according to less, but will not necessarily see a consistent snapshot of the // state of the set. func (s Set[T]) Range(lower Bound[T], upper Bound[T]) iterator.Iterator[T] { return iterator.Map(s.t.Range(lower, upper), func(pair KVPair[T, struct{}]) T { return pair.Key }) } // RangeReverse returns an iterator that yields the elements of the set between the given bounds in // descending order. // // The set may be safely modified during iteration and the iterator will continue from the // next-lowest item. Thus the iterator will see new items that are after the current position // of the iterator according to less, but will not necessarily see a consistent snapshot of the // state of the set. func (s Set[T]) RangeReverse(lower Bound[T], upper Bound[T]) iterator.Iterator[T] { return iterator.Map(s.t.RangeReverse(lower, upper), func(pair KVPair[T, struct{}]) T { return pair.Key }) } juniper-0.15.1/container/tree/testdata/000077500000000000000000000000001453027036000200045ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/000077500000000000000000000000001453027036000210025ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtree/000077500000000000000000000000001453027036000227225ustar00rootroot00000000000000009153937833fdc3c5f705f9ff01b85217e89cc4bc1ceca3cdb0c74fea9b2e81000066400000000000000000000000711453027036000336700ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("000100100200700800000000001010") 00c1ffca0ecb5725b8395831e8af3fa7b7fabb7433a1d1ca2d97d741b90ec812000066400000000000000000000000701453027036000337230ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("100000100z00\xd300200000z00z0") 031dc34785fde00f308cf6a0b029b5c6d5fa2594ca59bd9de06e339386925995000066400000000000000000000047471453027036000333440ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("?W000Xyq\xe0\x03\xbb\xc1\xb12\xdfd\xae\x9d\x13t\xf4\x91C\x12\xf5\x95\xb1.f\xcb\xff\x14\xa2sM#\x8a\xd3&@V\x8e㡮\x04}F\xba\x83\xae\xec/\xfc+\xa0M\x12\a\x18K/\xd8x\xbbtm\xba-\xa9|!\xd3*6\xcd\xe2\xa4\xe8u\xb4\x90ߒ\xb3.h\xaak\xb0^\xa9y\xb1\xd1\xc1\xbf\a\xce3\xbd\x04\xb5\xaa\x91Τ\xa9\xa9u\xd9#\xe5\xe8\x1b\xec\xea_.\x1b\xfc\xea\x84\x1c\xbff\xb7\xaerÝM\xb4\U00089224]\x94Xs\t\xa0\xf9\xa3\xc7\xe1\xdb\xc1|\xb8[\x8c\xeeI\x8cqOE\xaa\x0f\x06\xd1\xf4/[i\xcf\x1b\xf7L>\xa9\xfd\xa2\x1b\n$\x83\xc4\xc1\xea\x98w\xf1\x9a\xf7\xeb)\xe5RqI\x10\x03\xed\xee\fe\x02\x12w\xbfeKF}\xf5v~\x14\x11`\xbb\xf2\xa4\x8c6S\xc0.\x19\x0f\x14V\xecn\x81it[\xa2\xa1Ѻ\x8b\x03\xa6^\xb5`\xcb\xe6R\xcf\x18\xdc\x01\xfc\xb4\xc2\xc4⾛\x89\xd1\x17\xb8\x99\x15>\x16\x88\x82\xde/\xb1\v\x86\xc0f\x1bUJf\x00s\x94\x8eZ\xbdR)A\xce@\xfczw\x04\x80\xf6\x1b\xc7\xf1\x8e(\xf0̺zυ\xf0\x0eL>p?絴`ä\x10\xd3\xdb7\x94\xbb\x11\x9b\x12\n\xa1\xb0ʪ%\x10\xec\xec \xff\xa0\x89\xbe\b\x87W\x1f\x00W}\xfe]r\xd6}欱}^\x8e\xefΑ5\xf9feX\xcf\x14\xc6'\xf6\xb2j|\b\xfc\xb2\x1a\xc9\a,\xb6m\xef$\r~\xa5\\nj\xc0\xaeDD?d\x9d26\xb5\xf4\xe4\x19\xe1\xe4\x1fɿ\x89\xab\x10d\x97i\x1e-\xf5\xd8O!\x02ۉ\xb6\x9dK\xc4ж\xd6~^x=\xc70n\xca\"\xfa\xba\x910\xea\x05\xd9\xe5О\xcd\xef\x1e#װ\x9c\x17O\x10\xd5\xe7\x1fh,\xa2{\xc2\x17cO\x19\x90\xa9\xc1\xd9z\xef&דٞ*1f.\xad{@(\x8e\x04|S p\xcb\xc4\xf7\x9a\x11\xeb\xbf\fc6m\x16\xbf\x82Z\xae\xb26h\xf9[\xcb\xf4`\xe4\x91 \xc3\xebb\x8eT\xf7\xd7\xd8X}\x0f=\xe6l\x81\xe8\x99\xcd5\xc1id\x16\xae\xff\x99uk\xf4\x1a\x9a)\xef\f\xd3\x02\xd0\xcb\x13i\x99\n\xe8j\x98}\b\xe7Q\x87v?\uedca&\xff\x9eiROY\xfc\x96E\xe0\xf3\xf3\xf0\x1f\x85\x10q\x9ba\x1e9e\x19ƒ\x16\x896\xaaǓ\x8e\xb2\xc2:\vR7·2q\xb0\x81\x9fp\xc8'\x1c%\x88y\v\x86ž\xffR\\\xb5\x86Jx:\xe7y\x8c8v\v\xf7\u007f\xe0\x9fƘ\x13\x16\xdc\xe7\"]0\xae\x9a\xe1Ш[ff\xd4F1EႧeP\xda\xfd\x05k\xe2\xd0?\xdaQ\xfc\xcd(\x83\x98Dd\x8a\xe2\xbf\ap\x1d\xba\xf3\xcd#\xd65\x04.n\x16l\x89}\x8a\xb2%\b\xa1\xfc\x9dN_\xd7\xee\x81OP\xe5%/\x05)Z\x1f:\x9c\xa84\xa08x\x82\xd2\t\x95K\x1c\xaf\x9e?e\xbc\x8c\x84[ٮ\x99\xbcx\x9f\rM,\x96\xb6s\xcc\u05f7x犿A=\xd2T钖\xa2S\xef\xf8\xcf12\xf9\x884\xa0\x8cw4\xb9p5a\xbc\xbd?JR\xf2\xa1\x188)\xad\xdek\xe1N;\xc0\xfb\xb7k\xb7\xfdP&~6@k\x1a\xd0\xde_\xfe\xa8\xdf\xcfxXC^;0\xcb-&\xe6ʼn\x9d\xce\"\x89\xa23`\x96\xc8!K\xfe|\x9e\x84\xba\xab\x90\a\xaf\xc7\xec\xa0\aA\xa8p{M\xf2K\xc8\xf0\xf9\xc4\xdaX\xab \xe0|wk\xcf\xd2\x00-\xec\x8a\bφ\xa3\xbb\xbd2P\xaf\xb4R\xf7\xf0\xef\xb88S!\x94'\x13F\xc67\xc0\xd3z\xf6\xff\xd3\xe2)V\x13{\xd6t\xb9\x13\x8d\x81\a\xf9\xa9۴\x17\xad\x1d4\xd5&T\xb0\x14~\xd1I(<@\xe3020") 0c53970894bde3a58813c3176068bc71906d5a2224fcacd2ba45355da1e8c078000066400000000000000000000000651453027036000332210ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0000 00!002007008009001011") 1371a5eb44205936250944fc89b672086edc44d47ce83b92a1c5fb2bb713bf07000066400000000000000000000000551453027036000332250ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0\xac0\xc8882\xac&") 15ce7945e0616665cca67c8fa696e98daff25efdc6b7818294c14a023d1f276f000066400000000000000000000053031453027036000335100ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0102007000000\xa0\x03\xc3\xceJ$\x17wg\xf9\xffٜ\xeb\xa7\x18\x9d\x13~\xea=w\xd41\fKڮz\xb33\xe7^\xefu\fC&ȇ\xfe\xcf!\xb3\x17\xd39\x91~u\x0fq\xe4\x89g\xc1w.\x8f\xb8\x1a\f;\t\xe8K\u007f\xac\xac\xdeT]6\x1e!\x14BҌ\xf0\xbf\xcdJ\xfe\x9b\xfbV\x84'\x13/\xa9\x17C\xe2\x9fn\x8al\xf1\x11i6|\x95\x88.V\x1d\xa3\xfd\x81\x0e\xd5\xcd\v\xb3\xc7\xe4\x99\xc3\u007f\xaf\x81\x93pE&[\x1f\x06\xa0\xa9\x178\xa5^\x96[o¥\xb2\xbf\xa3\x06\x92d4n\xb4OM\x9e\xfa7[\x0f\xea\x89_\xc7ɓ\x12\x1c\xa2-\xaa\x93\xbe*\xeć$\x1c\x05\xdc~ƍ\x81\x81\xe5\x9fjb造\x8f7`\x8cG\x9aC.\x1c\xc3\t\x82\xe3\xc0\xdc\xe1\xff\x93\xa0\xa7W\xfe\xa4\xabi\xff\xa8\x81\xb6מĉ\xaf\xa7\xdd\x01=\x954\xbb\xb9#\x13\x80)kiq\xbd\x81\xa3\xdev\xa5WTz{s/\xe9\xa9\xf6\x80(\x1ci\fC\xc3-\xa5M\xf9\xb1^\x92v0\xe2\a\xda\x14{0$\x06\x93t\x15\xfe^\xa6\v\\\xafDr\u007f*<\xc1\x97\xc5\x11\x18ѯ\x1d\x12|\x04)\xef\r\xb7#\x00\x99\x1cخ\xf3\x9bQ\xfbj\x1cz\x0f\x89\x90\xa0)\nu\\\xeb\xbc\xfbfK\xbd\x18|\xa7\x95\xf80\x92\xa1K\x01\xc8X\x89\xe1R\\D\x05\xd1C\xdc\x1c\xacUe\xa2\x18\x17\xf0\xf0\x10\xd0Lx\x01i]Gq\x83\x8b\xdd\x0e\x1fC@\x0eɗ\x8d¨p\x8a\xb8[\x92t\xd2(c<\xf7@\x89n\x92\xa0C\x97\"<\xc0P\x92\x98\xb9\xfe\xc7\xd9ޟ]\xec\x8c2\x8d\xc1\x96z\xc0\\\x8aG\xb4\xbf\x1b]k\xd7\xec\x8fr\x80h\x9a\xc6\xc9_\x06\u007f\x04\xaf\x03\xbe|\xb1fp\x03\xb2\xdeu\xe8}\x84\x04\xc9\b\x13\xc6\x14\xc5\xd1\x1f\x04d\xe1˖\xa0\xeć$\x1c\x05\xe9\xc3\xfb\x8a\xa8<$\xb9\x8c\xb5\x1a\xbc\fcP\v\xa4\xfe5?д\x97{T\xf2\xc0\xfc\xcem쿀sQ\xef\x04k\xc6\n\xa2\x01\xf6\x19\x91\xcd\xed\x8a\xf27\xec\xd9تw\xe8ٽ\xd7010000") 3186c429e1e9457d3a9781f1574c9e425a2722699c873d9bd029ba8ef0bf1251000066400000000000000000000000551453027036000331210ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("010020070000080070") 397e77a7302992824e071c74afb6fc92bcae888518beec4e925fa04bd5f6109a000066400000000000000000000000541453027036000334750ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0000\x9a00\x9a010") 4daaf3fc572445d3e2abfecdd581c5fe3e9223c7e4f99d9024f1067ccc0fdd13000066400000000000000000000001051453027036000341050ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0\xb400\xaf00\xe200000100200700800900A00B0") 582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4000066400000000000000000000000341453027036000337700ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0") 66498f377f38b53eebe1ceaa4a53e4de01a04efc02ac9cfda60f9815f80e9b9d000066400000000000000000000000411453027036000341150ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("000000") 6846001db8403e686afe2cccc16baf98e7614a02fa3cae6377879774a557c815000066400000000000000000000000361453027036000333270ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("00.") 77bc6ab776b752e5affdebf8086b3b41a61db036dc0e301c9b6a5cddb6aa8de9000066400000000000000000000001001453027036000342260ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0\xb70\x80000\x9f0\x8f0102070 0!2\x80") 8b9e3d4777155ac218c623d5f60b358ef54154f768cef8dc88e5f8762d470486000066400000000000000000000001021453027036000332230ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0\xfe0\xdc0\xff00010\xfd02070\xdf2\xdc0") 8fb23cdaf598ef83b3b1ffe0adfd8afb9355230669630e347dcb0b6dccf2c96d000066400000000000000000000000461453027036000342020ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("01002000020") 93f613aa37dee6860c4c209d01a4b0a55a60f09b3c899ff4c08296db8a0e3757000066400000000000000000000000551453027036000333710ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("000000000000000010") 9480302240b5d8165509b6ee1a3010e0b8c626d177c018a321848b006e0f6feb000066400000000000000000000000501453027036000327450ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0\xf0+00\xf0.") 9e3edbbc4d6da50f8dd2f64f399c77e0a8cd9eeaadc48cd01aa522d616ad6fc1000066400000000000000000000001211453027036000344020ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0000100700800900A00B00010C000200700800900A021020200220") b602bd178b58b816d472c7033078290f38f8043e2909166fec0a5294768e9281000066400000000000000000000000771453027036000326210ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0X000\xd30\xeb0Y010Z020\xbe070a2\xbe") c5f5c514e05dd9777d15eebd16b522d33f7af1ce681588ec7926cf4224a17f1e000066400000000000000000000000531453027036000335440ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("020k0H0022202k2H") ca4f83585749886bc517c43d19817c64024511976fb9aabaebdd0a708cf3ed32000066400000000000000000000001411453027036000333770ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("00010207080A090C0X0Y0Z0a0b0B0c0x0y0z0\xfd0\xe40\xaf0\xa90^0[0\xc20\xb2") d1f25b1ddd4b728a3e601f1b50dc0f011524dba4d4667cca33481bef38acdef3000066400000000000000000000000631453027036000337620ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0200700800900A00100000A0") d71b22a346a2fa61e51ba06a6491a21b90f91f5d32898b830322b98c7db15f4f000066400000000000000000000001461453027036000332710ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0001020708090A0B0C0X0Y0\x040Z0a0b0c0x0y0z0\xd70\xfe0\x8e0g0\xa60\xcc0i0f200") d9db7005f68c1bb12871a742701b82f92f10e8a998bf52f29fc2faee0ce449a3000066400000000000000000000000401453027036000335300ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("00010") e0767f8f563d128ae718a2088c21f090d9525789fe108dec624274a3d16b18fe000066400000000000000000000000651453027036000331750ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0000100200700800B00900A01B") e1ca410ff2c96828ac46109e9ccc44cbb6af746f18558564519b9c4b3e333c23000066400000000000000000000000651453027036000333770ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0100000200 00700800900A010") ee2e988348496277d0979c29024c575be9789a337f19b06a9d5fb8829777f2f8000066400000000000000000000000661453027036000330470ustar00rootroot00000000000000juniper-0.15.1/container/tree/testdata/fuzz/FuzzBtreego test fuzz v1 []byte("0100700200000800900A00B0080") juniper-0.15.1/container/xheap/000077500000000000000000000000001453027036000163415ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/000077500000000000000000000000001453027036000201525ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/000077500000000000000000000000001453027036000211505ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzBasic/000077500000000000000000000000001453027036000230505ustar00rootroot000000000000001cb6ac353221cd94e905c3633b67191b65d08bc02ee23306f77e095223ce051c000066400000000000000000000000701453027036000332420ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzBasicgo test fuzz v1 []byte("~\x9d\x1cʿ\x17\r") []byte("~") 70a32a8bd28f5c4e4d81261c2400079adaf373c2e5d923da25fe4c04058faeb5000066400000000000000000000000461453027036000336270ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzBasicgo test fuzz v1 []byte("") []byte("") juniper-0.15.1/container/xheap/testdata/fuzz/FuzzPriorityQueue/000077500000000000000000000000001453027036000246555ustar00rootroot00000000000000c689c7ddbfd538bfa952c2740ba43fdfd83cb0d19796e15b3c30613f8a52ec9e000066400000000000000000000000521453027036000357140ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzPriorityQueuego test fuzz v1 []byte("1,.") []byte("0") d032b98f2fb2ea478028c32a0f45d04a549eb054f07654b3d4c5a374216a18a7000066400000000000000000000000541453027036000351360ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzPriorityQueuego test fuzz v1 []byte("90") []byte("\xb9") ee874908d2a4e971a8bce76a75ac6af8f2c82c2c47b225146f4b3c131cb3fc1c000066400000000000000000000000531453027036000356150ustar00rootroot00000000000000juniper-0.15.1/container/xheap/testdata/fuzz/FuzzPriorityQueuego test fuzz v1 []byte("0") []byte("\xc1") juniper-0.15.1/container/xheap/xheap.go000066400000000000000000000125351453027036000200030ustar00rootroot00000000000000// Package xheap contains extensions to the standard library package container/heap. package xheap import ( "github.com/bradenaw/juniper/internal/heap" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) // Heap is a min-heap (https://en.wikipedia.org/wiki/Binary_heap). Min-heaps are a collection // structure that provide constant-time access to the minimum element, and logarithmic-time removal. // They are most commonly used as a priority queue. // // Push and Pop take amortized O(log(n)) time where n is the number of items in the heap. // // Len and Peek take O(1) time. type Heap[T any] struct { // Indirect here so that Heap behaves as a reference type, like the map builtin. inner *heap.Heap[T] } // New returns a new Heap which uses less to determine the minimum element. // // The elements from initial are added to the heap. initial is modified by New and utilized by the // Heap, so it should not be used after passing to New(). Passing initial is faster (O(n)) than // creating an empty heap and pushing each item (O(n * log(n))). func New[T any](less xsort.Less[T], initial []T) Heap[T] { inner := heap.New( func(a, b T) bool { return less(a, b) }, func(a T, i int) {}, initial, ) return Heap[T]{ inner: &inner, } } // Len returns the current number of elements in the heap. func (h Heap[T]) Len() int { return h.inner.Len() } // Grow allocates sufficient space to add n more elements without needing to reallocate. func (h Heap[T]) Grow(n int) { h.inner.Grow(n) } // Shrink reallocates the backing buffer for h, if necessary, so that it fits only the current size // plus at most n extra items. func (h Heap[T]) Shrink(n int) { h.inner.Shrink(n) } // Push adds item to the heap. func (h Heap[T]) Push(item T) { h.inner.Push(item) } // Pop removes and returns the minimum item in the heap. It panics if h.Len()==0. func (h Heap[T]) Pop() T { return h.inner.Pop() } // Peek returns the minimum item in the heap. It panics if h.Len()==0. func (h Heap[T]) Peek() T { return h.inner.Peek() } // Iterate iterates over the elements of the heap. // // The iterator panics if the heap has been modified since iteration started. func (h Heap[T]) Iterate() iterator.Iterator[T] { return h.inner.Iterate() } // KP holds key and priority for PriorityQueue. type KP[K any, P any] struct { K K P P } // PriorityQueue is a queue that yields items in increasing order of priority. type PriorityQueue[K comparable, P any] struct { // Indirect here so that Heap behaves as a reference type, like the map builtin. inner *heap.Heap[KP[K, P]] m map[K]int } // NewPriorityQueue returns a new PriorityQueue which uses less to determine the minimum element. // // The elements from initial are added to the priority queue. initial is modified by // NewPriorityQueue and utilized by the PriorityQueue, so it should not be used after passing to // NewPriorityQueue. Passing initial is faster (O(n)) than creating an empty priority queue and // pushing each item (O(n * log(n))). // // Pop, Remove, and Update all take amortized O(log(n)) time where n is the number of items in the // queue. // // Len, Peek, Contains, and Priority take O(1) time. func NewPriorityQueue[K comparable, P any]( less xsort.Less[P], initial []KP[K, P], ) PriorityQueue[K, P] { h := PriorityQueue[K, P]{ m: make(map[K]int), } filtered := initial[:0] for _, kp := range initial { _, ok := h.m[kp.K] if ok { continue } h.m[kp.K] = -1 filtered = append(filtered, kp) } initial = filtered inner := heap.New( func(a, b KP[K, P]) bool { return less(a.P, b.P) }, func(x KP[K, P], i int) { h.m[x.K] = i }, initial, ) h.inner = &inner return h } // Len returns the current number of elements in the priority queue. func (h PriorityQueue[K, P]) Len() int { return h.inner.Len() } // Grow allocates sufficient space to add n more elements without needing to reallocate. func (h PriorityQueue[K, P]) Grow(n int) { h.inner.Grow(n) } // Update updates the priority of k to p, or adds it to the priority queue if not present. func (h PriorityQueue[K, P]) Update(k K, p P) { idx, ok := h.m[k] if ok { h.inner.UpdateAt(idx, KP[K, P]{k, p}) } else { h.inner.Push(KP[K, P]{k, p}) } } // Pop removes and returns the lowest-P item in the priority queue. It panics if h.Len()==0. func (h PriorityQueue[K, P]) Pop() K { item := h.inner.Pop() delete(h.m, item.K) return item.K } // Peek returns the key of the lowest-P item in the priority queue. It panics if h.Len()==0. func (h PriorityQueue[K, P]) Peek() K { return h.inner.Peek().K } // Contains returns true if the given key is present in the priority queue. func (h PriorityQueue[K, P]) Contains(k K) bool { _, ok := h.m[k] return ok } // Priority returns the priority of k, or the zero value of P if k is not present. func (h PriorityQueue[K, P]) Priority(k K) P { idx, ok := h.m[k] if ok { return h.inner.Item(idx).P } var zero P return zero } // Remove removes the item with the given key if present. func (h PriorityQueue[K, P]) Remove(k K) { i, ok := h.m[k] if !ok { return } h.inner.RemoveAt(i) delete(h.m, k) } // Iterate iterates over the elements of the priority queue. // // The iterator panics if the priority queue has been modified since iteration started. func (h PriorityQueue[K, P]) Iterate() iterator.Iterator[K] { return iterator.Map(h.inner.Iterate(), func(kp KP[K, P]) K { return kp.K }) } juniper-0.15.1/container/xheap/xheap_test.go000066400000000000000000000060521453027036000210370ustar00rootroot00000000000000package xheap import ( "testing" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) func FuzzHeap(f *testing.F) { f.Fuzz(func(t *testing.T, b1 []byte, b2 []byte) { t.Logf("initial: %#v", b1) t.Logf("pushed: %#v", b2) h := New(xsort.OrderedLess[byte], append([]byte{}, b1...)) for i := range b2 { h.Push(b2[i]) } outByIterate := iterator.Collect(h.Iterate()) xsort.Slice(outByIterate, xsort.OrderedLess[byte]) if outByIterate == nil { outByIterate = []byte{} } outByPop := []byte{} for h.Len() > 0 { item := h.Pop() outByPop = append(outByPop, item) } expected := append(append([]byte{}, b1...), b2...) t.Logf("expected: %#v", expected) xsort.Slice(expected, xsort.OrderedLess[byte]) t.Logf("expected sorted: %#v", expected) require2.SlicesEqual(t, expected, outByPop) require2.SlicesEqual(t, expected, outByIterate) }) } func FuzzPriorityQueue(f *testing.F) { const ( Update = iota Pop Peek Contains Priority Remove Iterate ) f.Fuzz(func(t *testing.T, b1 []byte, b2 []byte) { initial := make([]KP[int, float32], 0, len(b1)) oracle := make(map[int]float32) for i := range b1 { k := int((b1[i] & 0b00011100) >> 2) p := float32((b1[i] & 0b00000011)) _, ok := oracle[k] if ok { continue } initial = append(initial, KP[int, float32]{k, p}) oracle[k] = p } t.Logf("initial: %#v", initial) t.Logf("initial oracle: %#v", oracle) h := NewPriorityQueue(xsort.OrderedLess[float32], initial) oracleLowestP := func() float32 { first := true lowest := float32(0) for _, p := range oracle { if first || p < lowest { lowest = p } first = false } return lowest } for _, b := range b2 { op := (b & 0b11100000) >> 5 k := int((b & 0b00011100) >> 2) p := float32(b & 0b00000011) switch op { case Update: t.Logf("Update(%d, %f)", k, p) oracle[k] = p h.Update(k, p) case Pop: t.Logf("Pop()") if len(oracle) == 0 { require2.Equal(t, 0, h.Len()) continue } lowestP := oracleLowestP() hPopped := h.Pop() require2.Equal(t, lowestP, oracle[hPopped]) delete(oracle, hPopped) case Peek: t.Logf("Peek()") if len(oracle) == 0 { require2.Equal(t, 0, h.Len()) continue } lowestP := oracleLowestP() hPeeked := h.Peek() require2.Equal(t, lowestP, oracle[hPeeked]) case Contains: t.Logf("Contains(%d)", k) _, oracleContains := oracle[k] require2.Equal(t, oracleContains, h.Contains(k)) case Priority: t.Logf("Priority(%d)", k) require2.Equal(t, oracle[k], h.Priority(k)) case Remove: t.Logf("Remove(%d)", k) delete(oracle, k) h.Remove(k) case Iterate: t.Logf("Iterate()") oracleItems := make([]int, 0, len(oracle)) for k := range oracle { oracleItems = append(oracleItems, k) } items := iterator.Collect(h.Iterate()) require2.ElementsMatch(t, oracleItems, items) } require2.Equal(t, len(oracle), h.Len()) } }) } juniper-0.15.1/container/xlist/000077500000000000000000000000001453027036000163775ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/000077500000000000000000000000001453027036000202105ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/000077500000000000000000000000001453027036000212065ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzList/000077500000000000000000000000001453027036000230005ustar00rootroot00000000000000052df2b9addb4b5728625f2d904855ac4d11c08736fd5f0c514fa4093bf012a4000066400000000000000000000002531453027036000334220ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("000000000100000000100000000100000000100000000100000000100000000100000000000000000000000000100000000100000000100000000100000000100000000100000000") 082f5d3003e4b7eecadd5da481360ab6c5d27d61f9c8dd3c3726b9997e8abd7c000066400000000000000000000000551453027036000340370ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("100000000000000000") 19ad1cdd3cda96ceaa49bad30f44d9b974ad0551d8c37a4b21fe5d10d8703c4b000066400000000000000000000002531453027036000341410ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000") 36bbbd91660d42e6fbe6e5de21891a1c39dbddfe395a0001506dd742792092e1000066400000000000000000000000661453027036000335160ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("100000000100000000000000000") 45a5647fbf2a9781808dc942c4cfc85df42c72ecd8673cb46c9625c69859e895000066400000000000000000000000651453027036000334570ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("70000000020000000000000000") 61f9c13a583fd565a960887f61fab4b1d20988bed1ed3eed6dddbbc745eaf72e000066400000000000000000000001321453027036000342030ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("700000000700000000700000000700000000700000000200012000101001100") 732421c657c5db19e5e068faf0d3c7c44bc9960e8f354da429b58f297b1a5cbc000066400000000000000000000001121453027036000336130ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("000000000000000000000000000200000000\xf11000000") 871e27fb3bb3ddcdfbd1e38ae83677110d4a78ad4ee347256eae2a7563bd23b7000066400000000000000000000000661453027036000340250ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("700000000!00000000000000000") 8853315897765b7769cf4d45dee20bfa199683ba2778135f2d73263e2b8717ea000066400000000000000000000000441453027036000331350ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("000000000") aaeabb36ec0ba5e68dd1a6937842e2f4ed3ae6c69e4233639e6095cf20966d04000066400000000000000000000000541453027036000336720ustar00rootroot00000000000000juniper-0.15.1/container/xlist/testdata/fuzz/FuzzListgo test fuzz v1 []byte("20000000000000000") juniper-0.15.1/container/xlist/xlist.go000066400000000000000000000071771453027036000201050ustar00rootroot00000000000000// Package xlist contains extensions to the standard library package container/list. package xlist // List is a doubly-linked list. type List[T any] struct { front *Node[T] back *Node[T] size int } // Len returns the number of items in the list. func (l *List[T]) Len() int { return l.size } // Front returns the node at the front of the list. func (l *List[T]) Front() *Node[T] { return l.front } // Back returns the node at the back of the list. func (l *List[T]) Back() *Node[T] { return l.back } // Clear removes all nodes from the list. func (l *List[T]) Clear() { l.front = nil; l.back = nil; l.size = 0 } // PushFront adds a new node with the given value to the front of the list. func (l *List[T]) PushFront(value T) *Node[T] { node := &Node[T]{ next: l.front, Value: value, } if l.front != nil { l.front.prev = node } l.front = node if l.back == nil { l.back = node } l.size++ return node } // PushBack adds a new node with the given value to the back of the list. func (l *List[T]) PushBack(value T) *Node[T] { node := &Node[T]{ prev: l.back, Value: value, } if l.back != nil { l.back.next = node } l.back = node if l.front == nil { l.front = node } l.size++ return node } // InsertBefore adds a new node with the given value before the node mark. func (l *List[T]) InsertBefore(value T, mark *Node[T]) *Node[T] { node := &Node[T]{ Value: value, prev: mark.prev, next: mark, } mark.prev = node if node.prev != nil { node.prev.next = node } if l.front == mark { l.front = node } l.size++ return node } // InsertBefore adds a new node with the given value after the node mark. func (l *List[T]) InsertAfter(value T, mark *Node[T]) *Node[T] { node := &Node[T]{ Value: value, prev: mark, next: mark.next, } mark.next = node if node.next != nil { node.next.prev = node } if l.back == mark { l.back = node } l.size++ return node } // Remove removes node from the list. func (l *List[T]) Remove(node *Node[T]) { l.remove(node) node.prev = nil node.next = nil l.size-- } func (l *List[T]) remove(node *Node[T]) { if l.front == node { l.front = l.front.next } else { node.prev.next = node.next } if l.back == node { l.back = l.back.prev } else { node.next.prev = node.prev } } // MoveBefore moves node just before mark. Afterwards, mark.Prev() == node && node.Next() == mark. func (l *List[T]) MoveBefore(node *Node[T], mark *Node[T]) { if node == mark { return } l.remove(node) node.prev = mark.prev mark.prev = node node.next = mark if node.prev != nil { node.prev.next = node } if l.front == mark { l.front = node } } // MoveAfter moves node just after mark. Afterwards, mark.Next() == node && node.Prev() == mark. func (l *List[T]) MoveAfter(node *Node[T], mark *Node[T]) { if node == mark { return } l.remove(node) node.next = mark.next mark.next = node node.prev = mark if node.next != nil { node.next.prev = node } if l.back == mark { l.back = node } } // MoveToFront moves node to the front of the list. func (l *List[T]) MoveToFront(node *Node[T]) { l.MoveBefore(node, l.Front()) } // MoveToFront moves node to the back of the list. func (l *List[T]) MoveToBack(node *Node[T]) { l.MoveAfter(node, l.Back()) } // Node is a node in a linked-list. type Node[T any] struct { prev *Node[T] next *Node[T] // Value is user-controlled, and never modified by this package. Value T } // Next returns the next node in the list that n is a part of, if there is one. func (n *Node[T]) Next() *Node[T] { return n.next } // Prev returns the previous node in the list that n is a part of, if there is one. func (n *Node[T]) Prev() *Node[T] { return n.prev } juniper-0.15.1/container/xlist/xlist_test.go000066400000000000000000000066041453027036000211360ustar00rootroot00000000000000package xlist import ( "testing" "github.com/bradenaw/juniper/internal/fuzz" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/xslices" ) func FuzzList(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte) { var l List[int] var oracle []int nodeAt := func(i int) *Node[int] { j := 0 curr := l.Front() for j < i { curr = curr.Next() j++ } return curr } fuzz.Operations( b, func() { // check t.Logf("%v", oracle) require2.Equal(t, len(oracle), l.Len()) if len(oracle) == 0 { require2.Nil(t, l.Front()) require2.Nil(t, l.Back()) return } node := l.Front() for i := range oracle { require2.NotNilf(t, node, "node nil at index %d, len(oracle)==%d", i, len(oracle)) require2.Equal(t, oracle[i], node.Value) if node.Next() != nil { require2.Equal(t, node, node.Next().Prev()) } node = node.Next() } require2.Nil(t, node) require2.NotNil(t, l.Back()) require2.Equal(t, oracle[len(oracle)-1], l.Back().Value) }, func(value int) { t.Logf("PushFront(%d)", value) l.PushFront(value) oracle = append([]int{value}, oracle...) }, func(value int) { t.Logf("PushBack(%d)", value) l.PushBack(value) oracle = append(oracle, value) }, func(value int, idx int) { if len(oracle) == 0 || idx < 0 { return } idx = idx % len(oracle) t.Logf("InsertBefore(%d, node @ %d)", value, idx) l.InsertBefore(value, nodeAt(idx)) oracle = xslices.Insert(oracle, idx, value) }, func(value int, idx int) { if len(oracle) == 0 || idx < 0 { return } idx = idx % len(oracle) t.Logf("InsertAfter(%d, node @ %d)", value, idx) l.InsertAfter(value, nodeAt(idx)) oracle = xslices.Insert(oracle, idx+1, value) }, func(idx int) { if len(oracle) == 0 || idx < 0 { return } idx = idx % len(oracle) t.Logf("Remove(node @ %d)", idx) l.Remove(nodeAt(idx)) oracle = xslices.Remove(oracle, idx, 1) }, func(src, dest int) { if len(oracle) == 0 || src < 0 || dest < 0 { return } src = src % len(oracle) dest = dest % len(oracle) t.Logf("MoveBefore(node @ %d, node @ %d)", src, dest) l.MoveBefore(nodeAt(src), nodeAt(dest)) item := oracle[src] oracle = xslices.Remove(oracle, src, 1) if dest > src { dest-- } oracle = xslices.Insert(oracle, dest, item) }, func(src, dest int) { if len(oracle) == 0 || src < 0 || dest < 0 { return } src = src % len(oracle) dest = dest % len(oracle) t.Logf("MoveAfter(node @ %d, node @ %d)", src, dest) l.MoveAfter(nodeAt(src), nodeAt(dest)) item := oracle[src] oracle = xslices.Remove(oracle, src, 1) if dest >= src { dest-- } oracle = xslices.Insert(oracle, dest+1, item) }, func(idx int) { if len(oracle) == 0 || idx < 0 { return } idx = idx % len(oracle) t.Logf("MoveToFront(node @ %d)", idx) l.MoveToFront(nodeAt(idx)) item := oracle[idx] oracle = xslices.Remove(oracle, idx, 1) oracle = append([]int{item}, oracle...) }, func(idx int) { if len(oracle) == 0 || idx < 0 { return } idx = idx % len(oracle) t.Logf("MoveToBack(node @ %d)", idx) l.MoveToBack(nodeAt(idx)) item := oracle[idx] oracle = xslices.Remove(oracle, idx, 1) oracle = append(oracle, item) }, ) }) } juniper-0.15.1/fuzz.sh000066400000000000000000000006041453027036000146040ustar00rootroot00000000000000#!/bin/bash set -euxo pipefail find . -name "*_test.go" \ | xargs grep "func Fuzz" \ | sed -E -e "s/^\.\/(([a-zA-Z0-9]+\/)+).+?(Fuzz[a-zA-Z0-9]+).+?$/\1 \3/g" \ | while read package_name fuzz_test_name; do echo "$package_name $fuzz_test_name" "$GOROOT/bin/go" test --fuzz "$fuzz_test_name" --fuzztime=15s "github.com/bradenaw/juniper/$package_name" done juniper-0.15.1/go.mod000066400000000000000000000002441453027036000143600ustar00rootroot00000000000000module github.com/bradenaw/juniper go 1.18 require ( golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ) juniper-0.15.1/go.sum000066400000000000000000000006401453027036000144050ustar00rootroot00000000000000golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= juniper-0.15.1/internal/000077500000000000000000000000001453027036000150665ustar00rootroot00000000000000juniper-0.15.1/internal/fuzz/000077500000000000000000000000001453027036000160645ustar00rootroot00000000000000juniper-0.15.1/internal/fuzz/fuzz.go000066400000000000000000000031721453027036000174140ustar00rootroot00000000000000package fuzz import ( "encoding/binary" "fmt" "reflect" ) func Operations(b []byte, check func(), fns ...interface{}) { choose := func(n int) (int, bool) { if len(b) < 1 { return 0, false } if n > 255 { panic("") } choice := int(b[0]) if choice >= n { return 0, false } b = b[1:] return choice, true } takeByte := func() (byte, bool) { if len(b) < 1 { return 0, false } x := b[0] b = b[1:] return x, true } takeUint16 := func() (uint16, bool) { if len(b) < 2 { return 0, false } x := uint16(binary.BigEndian.Uint16(b[:2])) b = b[2:] return x, true } takeInt := func() (int, bool) { if len(b) < 8 { return 0, false } x := int(binary.BigEndian.Uint64(b[:8])) b = b[8:] return x, true } takeBool := func() (bool, bool) { b, ok := takeByte() return b != 0, ok } Loop: for { check() i, ok := choose(len(fns)) if !ok { break } fnV := reflect.ValueOf(fns[i]) args := make([]reflect.Value, fnV.Type().NumIn()) for j := range args { argType := fnV.Type().In(j) switch argType.Kind() { case reflect.Int: x, ok := takeInt() if !ok { break Loop } args[j] = reflect.ValueOf(x) case reflect.Uint8: x, ok := takeByte() if !ok { break Loop } args[j] = reflect.ValueOf(x) case reflect.Uint16: x, ok := takeUint16() if !ok { break Loop } args[j] = reflect.ValueOf(x) case reflect.Bool: x, ok := takeBool() if !ok { break Loop } args[j] = reflect.ValueOf(x) default: panic(fmt.Sprintf("arg type %s not supported", argType.Kind())) } } fnV.Call(args) } } juniper-0.15.1/internal/heap/000077500000000000000000000000001453027036000160035ustar00rootroot00000000000000juniper-0.15.1/internal/heap/heap.go000066400000000000000000000061531453027036000172540ustar00rootroot00000000000000package heap import ( "errors" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xslices" ) var ErrHeapModified = errors.New("heap modified during iteration") // Duplicated from xsort to avoid dependency cycle. type Less[T any] func(a, b T) bool type Heap[T any] struct { lessFn Less[T] indexChanged func(x T, i int) a []T gen int } func New[T any](less Less[T], indexChanged func(x T, i int), initial []T) Heap[T] { h := Heap[T]{ lessFn: less, indexChanged: indexChanged, a: initial, } for i := len(initial)/2 - 1; i >= 0; i-- { h.percolateDown(i) } for i := range initial { h.notifyIndexChanged(i) } return h } func (h *Heap[T]) Len() int { return len(h.a) } func (h *Heap[T]) Grow(n int) { h.a = xslices.Grow(h.a, n) } func (h *Heap[T]) Shrink(n int) { h.a = xslices.Shrink(h.a, n) } func (h *Heap[T]) Push(item T) { h.a = append(h.a, item) h.notifyIndexChanged(len(h.a) - 1) h.percolateUp(len(h.a) - 1) h.gen++ } func (h *Heap[T]) Pop() T { var zero T item := h.a[0] (h.a)[0] = (h.a)[len(h.a)-1] // In case T is a pointer, clear this out to keep the ref from being live. (h.a)[len(h.a)-1] = zero h.a = (h.a)[:len(h.a)-1] if len(h.a) > 0 { h.notifyIndexChanged(0) } h.percolateDown(0) h.gen++ return item } func (h *Heap[T]) Peek() T { return h.a[0] } func (h *Heap[T]) RemoveAt(i int) { var zero T h.a[i] = h.a[len(h.a)-1] h.a[len(h.a)-1] = zero h.a = h.a[:len(h.a)-1] if i < len(h.a) { h.notifyIndexChanged(i) h.percolateUp(i) h.percolateDown(i) } h.gen++ } func (h *Heap[T]) Item(i int) T { return h.a[i] } func (h *Heap[T]) UpdateAt(i int, item T) { h.a[i] = item h.notifyIndexChanged(i) h.percolateUp(i) h.percolateDown(i) } func (h *Heap[T]) percolateUp(i int) { for i > 0 { p := parent(i) if h.less(i, p) { h.swap(i, p) } i = p } } func (h *Heap[T]) swap(i, j int) { (h.a)[i], (h.a)[j] = (h.a)[j], (h.a)[i] h.notifyIndexChanged(i) h.notifyIndexChanged(j) } func (h *Heap[T]) notifyIndexChanged(i int) { h.indexChanged(h.a[i], i) } func (h *Heap[T]) less(i, j int) bool { return h.lessFn((h.a)[i], (h.a)[j]) } func (h *Heap[T]) percolateDown(i int) { for { left, right := children(i) if left >= len(h.a) { // no children return } else if right >= len(h.a) { // only has a left child if h.less(left, i) { h.swap(left, i) i = left } else { return } } else { // has both children least := left if h.less(right, left) { least = right } if h.less(least, i) { h.swap(least, i) i = least } else { return } } } } type heapIterator[T any] struct { h *Heap[T] inner iterator.Iterator[T] gen int } func (iter *heapIterator[T]) Next() (T, bool) { if iter.gen == -1 { iter.gen = iter.h.gen iter.inner = iterator.Slice(iter.h.a) } else if iter.gen != iter.h.gen { panic(ErrHeapModified) } return iter.inner.Next() } func (h *Heap[T]) Iterate() iterator.Iterator[T] { return &heapIterator[T]{h: h, gen: -1} } func parent(i int) int { return (i - 1) / 2 } func children(i int) (int, int) { return i*2 + 1, i*2 + 2 } juniper-0.15.1/internal/heap/heap_test.go000066400000000000000000000011061453027036000203040ustar00rootroot00000000000000package heap import ( "testing" "github.com/bradenaw/juniper/internal/require2" ) func TestParentChildren(t *testing.T) { require2.Equal(t, 0, parent(1)) require2.Equal(t, 0, parent(2)) left, right := children(0) require2.Equal(t, 1, left) require2.Equal(t, 2, right) require2.Equal(t, 1, parent(3)) require2.Equal(t, 1, parent(4)) left, right = children(1) require2.Equal(t, 3, left) require2.Equal(t, 4, right) require2.Equal(t, 2, parent(5)) require2.Equal(t, 2, parent(6)) left, right = children(2) require2.Equal(t, 5, left) require2.Equal(t, 6, right) } juniper-0.15.1/internal/orderedhashmap/000077500000000000000000000000001453027036000200545ustar00rootroot00000000000000juniper-0.15.1/internal/orderedhashmap/orderedhashmap.go000066400000000000000000000105361453027036000233760ustar00rootroot00000000000000// package orderedhashmap contains a simple and very inefficient ordered map using the map builtin // for comparing against other ordered containers in tests. package orderedhashmap import ( "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) type KVPair[K any, V any] struct { K K V V } type Map[K comparable, V any] struct { less xsort.Less[K] m map[K]V } func NewMap[K comparable, V any](less xsort.Less[K]) Map[K, V] { return Map[K, V]{ less: less, m: make(map[K]V), } } func (m Map[K, V]) Len() int { return len(m.m) } func (m Map[K, V]) Put(k K, v V) { m.m[k] = v } func (m Map[K, V]) Delete(k K) { delete(m.m, k) } func (m Map[K, V]) Get(k K) V { return m.m[k] } func (m Map[K, V]) Contains(k K) bool { _, ok := m.m[k] return ok } func (m Map[K, V]) First() (K, V) { first := true var min K for k := range m.m { if first || m.less(k, min) { min = k first = false } } return min, m.m[min] } func (m Map[K, V]) Last() (K, V) { first := true var max K for k := range m.m { if first || m.less(max, k) { max = k first = false } } return max, m.m[max] } func (m Map[K, V]) Iterate() iterator.Iterator[KVPair[K, V]] { return m.Cursor().Forward() } func (m Map[K, V]) Cursor() *Cursor[K, V] { c := &Cursor[K, V]{ m: m, } c.SeekFirst() return c } func (m Map[K, V]) lastLess(k K) (K, bool) { first := true var out K for existingK := range m.m { if xsort.GreaterOrEqual(m.less, existingK, k) { continue } if first || m.less(out, existingK) { out = existingK first = false } } return out, !first } func (m Map[K, V]) firstGreater(k K) (K, bool) { first := true var out K for existingK := range m.m { if xsort.LessOrEqual(m.less, existingK, k) { continue } if first || m.less(existingK, out) { out = existingK first = false } } return out, !first } type Cursor[K comparable, V any] struct { m Map[K, V] offEdge bool k K } func (c *Cursor[K, V]) SeekFirst() { c.k, _ = c.m.First() c.offEdge = len(c.m.m) == 0 } func (c *Cursor[K, V]) SeekLast() { c.k, _ = c.m.Last() c.offEdge = len(c.m.m) == 0 } func (c *Cursor[K, V]) set(k K) { c.k = k c.offEdge = false } func (c *Cursor[K, V]) SeekLastLess(k K) { k, ok := c.m.lastLess(k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) SeekLastLessOrEqual(k K) { if c.m.Contains(k) { c.set(k) return } k, ok := c.m.lastLess(k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) SeekFirstGreaterOrEqual(k K) { if c.m.Contains(k) { c.set(k) return } k, ok := c.m.firstGreater(k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) SeekFirstGreater(k K) { k, ok := c.m.firstGreater(k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) Next() { if c.offEdge { return } k, ok := c.m.firstGreater(c.k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) Prev() { if c.offEdge { return } k, ok := c.m.lastLess(c.k) c.offEdge = !ok if ok { c.set(k) } } func (c *Cursor[K, V]) deleted() bool { return !c.m.Contains(c.k) } func (c *Cursor[K, V]) Ok() bool { _, ok := c.m.m[c.k] return !c.offEdge && ok } func (c *Cursor[K, V]) Key() K { return c.k } func (c *Cursor[K, V]) Value() V { return c.m.m[c.k] } type forwardIterator[K comparable, V any] struct { c Cursor[K, V] } func (iter *forwardIterator[K, V]) Next() (KVPair[K, V], bool) { if !iter.c.offEdge && iter.c.deleted() { iter.c.SeekFirstGreaterOrEqual(iter.c.Key()) } if !iter.c.Ok() { var zero KVPair[K, V] return zero, false } k := iter.c.Key() v := iter.c.Value() iter.c.Next() return KVPair[K, V]{k, v}, true } func (c *Cursor[K, V]) Forward() iterator.Iterator[KVPair[K, V]] { c2 := *c if !c2.offEdge && c2.deleted() { c2.SeekFirstGreater(c2.k) } return &forwardIterator[K, V]{c: c2} } type backwardIterator[K comparable, V any] struct { c Cursor[K, V] } func (iter *backwardIterator[K, V]) Next() (KVPair[K, V], bool) { if !iter.c.offEdge && iter.c.deleted() { iter.c.SeekLastLessOrEqual(iter.c.Key()) } if !iter.c.Ok() { var zero KVPair[K, V] return zero, false } k := iter.c.Key() v := iter.c.Value() iter.c.Prev() return KVPair[K, V]{k, v}, true } func (c *Cursor[K, V]) Backward() iterator.Iterator[KVPair[K, V]] { c2 := *c if !c2.offEdge && c2.deleted() { c2.SeekLastLess(c.k) } return &backwardIterator[K, V]{c: c2} } juniper-0.15.1/internal/require2/000077500000000000000000000000001453027036000166245ustar00rootroot00000000000000juniper-0.15.1/internal/require2/require.go000066400000000000000000000074131453027036000206340ustar00rootroot00000000000000package require2 import ( "errors" "reflect" "runtime" "strconv" "strings" "testing" "golang.org/x/exp/constraints" ) func Equal[T comparable](t *testing.T, expected T, actual T) { if expected != actual { fatalf(t, "assertion failed: %#v == %#v", expected, actual) } } func DeepEqual[T any](t *testing.T, a T, b T) { if !reflect.DeepEqual(a, b) { fatalf(t, "assertion failed: reflect.DeepEqual(%#v, %#v)", a, b) } } func Equalf[T comparable](t *testing.T, expected T, actual T, s string, fmtArgs ...any) { if expected != actual { fatalf(t, "assertion failed: %#v == %#v\n"+s, append([]any{expected, actual}, fmtArgs...)...) } } func SlicesEqual[T comparable](t *testing.T, expected []T, actual []T) { n := len(expected) if len(actual) < n { n = len(actual) } for i := 0; i < n; i++ { if expected[i] != actual[i] { fatalf(t, "differ at index %d: %#v != %#v", i, expected[i], actual[i]) } } if len(expected) != len(actual) { fatalf(t, "lengths differ: %d != %d", len(expected), len(actual)) } } func Nil[T any](t *testing.T, a *T) { if a != nil { t.Fatal("expected nil") } } func NotNil[T any](t *testing.T, a *T) { if a == nil { t.Fatal("expected not nil") } } func NotNilf[T any](t *testing.T, a *T, f string, fmtArgs ...any) { if a == nil { fatalf(t, "expected not nil\n"+f, fmtArgs...) } } func NoError(t *testing.T, err error) { if err != nil { fatalf(t, "expected no error, got %#v", err) } } func Error(t *testing.T, err error) { if err == nil { fatalf(t, "expected %T (%s), got no error", err, err) } } func ErrorIs(t *testing.T, err error, match error) { if !errors.Is(err, match) { fatalf(t, "expected %T (%s), got %#v", match, match, err) } } func Greater[T constraints.Ordered](t *testing.T, a T, b T) { if !(a > b) { fatalf(t, "assertion failed: %#v > %#v", a, b) } } func GreaterOrEqual[T constraints.Ordered](t *testing.T, a T, b T) { if !(a >= b) { fatalf(t, "assertion failed: %#v >= %#v", a, b) } } func Less[T constraints.Ordered](t *testing.T, a T, b T) { if !(a < b) { fatalf(t, "assertion failed: %#v < %#v", a, b) } } func LessOrEqual[T constraints.Ordered](t *testing.T, a T, b T) { if !(a <= b) { fatalf(t, "assertion failed: %#v <= %#v", a, b) } } func InDelta[T ~float32 | ~float64](t *testing.T, actual T, expected T, delta T) { diff := actual - expected if diff < 0 { diff = -diff } if diff > delta { fatalf(t, "expected %#v to be within %#v of %#v, actually %#v", actual, delta, expected, diff) } } func True(t *testing.T, b bool) { if !b { fatalf(t, "expected true") } } func Truef(t *testing.T, b bool, s string, fmtArgs ...any) { if !b { fatalf(t, "expected true\n"+s, fmtArgs...) } } func ElementsMatch[T comparable](t *testing.T, a []T, b []T) { aSet := make(map[T]struct{}, len(a)) for _, ai := range a { aSet[ai] = struct{}{} } bSet := make(map[T]struct{}, len(b)) for _, bi := range b { bSet[bi] = struct{}{} } for ai := range aSet { _, ok := bSet[ai] if !ok { fatalf(t, "%#v appears in a but not in b", ai) } } for bi := range bSet { _, ok := aSet[bi] if !ok { fatalf(t, "%#v appears in b but not in a", bi) } } } func fatalf(t *testing.T, s string, fmtArgs ...any) { var buf [64]uintptr var ptrs []uintptr skip := 2 for { n := runtime.Callers(skip, buf[:]) ptrs = append(ptrs, buf[:n]...) if n < len(buf) { break } skip += n } var sb strings.Builder frames := runtime.CallersFrames(ptrs) for { frame, more := frames.Next() _, _ = sb.WriteString(frame.Function) _, _ = sb.WriteString("(...)\n ") _, _ = sb.WriteString(frame.File) _, _ = sb.WriteString(":") _, _ = sb.WriteString(strconv.Itoa(frame.Line)) _, _ = sb.WriteString("\n") if !more { break } } t.Fatalf(s+"\n\n%s", append(fmtArgs, sb.String())...) } juniper-0.15.1/internal/tseq/000077500000000000000000000000001453027036000160425ustar00rootroot00000000000000juniper-0.15.1/internal/tseq/tseq.go000066400000000000000000000033711453027036000173510ustar00rootroot00000000000000package tseq import ( "encoding/hex" "flag" "testing" ) var seed string func init() { flag.StringVar(&seed, "tseq.seed", seed, "") } func Run(t *testing.T, f func(tseq *TSeq)) { tseq := TSeq{} if seed == "" { defer func() { if t.Failed() { t.Logf("rerun with --tseq.seed=%s", scriptToString(tseq.script)) } }() for { f(&tseq) if !tseq.next() { break } } } else { var err error tseq.script, err = scriptFromString(seed) if err != nil { t.Fatalf("invalid --tseq.seed %s: %s", seed, err) } f(&tseq) } } type TSeq struct { script []bool i int } func scriptToString(script []bool) string { b := make([]byte, (len(script)+7)/8) for i := range script { if script[i] { idx := i / 8 off := i % 8 b[idx] |= 1 << off } } return "00" + hex.EncodeToString(b) } func scriptFromString(s string) ([]bool, error) { b, err := hex.DecodeString(s) if err != nil { return nil, err } b = b[1:] script := make([]bool, len(b)*8) for i := range script { idx := i / 8 off := i % 8 script[i] = ((b[idx] >> off) & 1) == 1 } return script, nil } func (tseq *TSeq) FlipCoin() bool { if tseq.i == len(tseq.script) { tseq.script = append(tseq.script, false) } outcome := tseq.script[tseq.i] tseq.i++ return outcome } func (tseq *TSeq) Choose(n int) int { if n == 0 { panic("can't choose between 0 choices") } i := 0 j := n - 1 for i < j { mid := (i + j) / 2 if tseq.FlipCoin() { i = mid + 1 } else { j = mid } } return i } func (tseq *TSeq) next() bool { for len(tseq.script) > 0 && tseq.script[len(tseq.script)-1] { tseq.script = tseq.script[:len(tseq.script)-1] } if len(tseq.script) == 0 { return false } tseq.script[len(tseq.script)-1] = true tseq.i = 0 return true } juniper-0.15.1/internal/tseq/tseq_test.go000066400000000000000000000022061453027036000204040ustar00rootroot00000000000000package tseq import ( "fmt" "testing" "github.com/bradenaw/juniper/internal/require2" ) func TestTSeqBasic(t *testing.T) { runs := [][]bool{} Run(t, func(tseq *TSeq) { runs = append(runs, []bool{tseq.FlipCoin(), tseq.FlipCoin(), tseq.FlipCoin()}) }) require2.DeepEqual(t, runs, [][]bool{ {false, false, false}, {false, false, true}, {false, true, false}, {false, true, true}, {true, false, false}, {true, false, true}, {true, true, false}, {true, true, true}, }) } func TestTSeqDependent(t *testing.T) { runs := [][]bool{} Run(t, func(tseq *TSeq) { if tseq.FlipCoin() { runs = append(runs, []bool{true, tseq.FlipCoin()}) } else { runs = append(runs, []bool{false}) } }) require2.DeepEqual(t, runs, [][]bool{ {false}, {true, false}, {true, true}, }) } func TestTSeqChoose(t *testing.T) { for i := 1; i < 50; i++ { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { expected := []int{} for j := 0; j < i; j++ { expected = append(expected, j) } runs := []int{} Run(t, func(tseq *TSeq) { runs = append(runs, tseq.Choose(i)) }) require2.DeepEqual(t, runs, expected) }) } } juniper-0.15.1/iterator/000077500000000000000000000000001453027036000151035ustar00rootroot00000000000000juniper-0.15.1/iterator/iterator.go000066400000000000000000000276361453027036000173010ustar00rootroot00000000000000// Package iterator allows iterating over sequences of values, for example the contents of a // container. package iterator // Iterator is used to iterate over a sequence of values. // // Iterators are lazy, meaning they do no work until a call to Next(). // // Iterators do not need to be fully consumed, callers may safely abandon an iterator before Next // returns false. type Iterator[T any] interface { // Next advances the iterator and returns the next item. Once the iterator is finished, the // first return is meaningless and the second return is false. Note that the final value of the // iterator has true in the second return, and it's the following call that returns false in the // second return. // // Once Next returns false in the second return, it is expected that it will always return false // afterwards. Next() (T, bool) } //////////////////////////////////////////////////////////////////////////////////////////////////// // Converters // // Functions that produce an Iterator from some other type. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Chan returns an Iterator that yields the values received on c. func Chan[T any](c <-chan T) Iterator[T] { return &chanIterator[T]{c: c} } type chanIterator[T any] struct { c <-chan T } func (iter *chanIterator[T]) Next() (T, bool) { item, ok := <-iter.c return item, ok } // Counter returns an iterator that counts up from 0, yielding n items. // // The following are equivalent: // // for i := 0; i < n; i++ { // fmt.Println(n) // } // // // iter := iterator.Counter(n) // for { // item, ok := iter.Next() // if !ok { // break // } // fmt.Println(item) // } func Counter(n int) Iterator[int] { return &counterIterator{i: 0, n: n} } type counterIterator struct { i int n int } func (iter *counterIterator) Next() (int, bool) { if iter.i >= iter.n { return 0, false } item := iter.i iter.i++ return item, true } // Empty returns an iterator that yields no items. func Empty[T any]() Iterator[T] { return emptyIterator[T]{} } type emptyIterator[T any] struct{} func (iter emptyIterator[T]) Next() (T, bool) { var zero T return zero, false } // Repeat returns an iterator that yields item n times. func Repeat[T any](item T, n int) Iterator[T] { return &repeatIterator[T]{ item: item, x: n, } } type repeatIterator[T any] struct { item T x int } func (iter *repeatIterator[T]) Next() (T, bool) { if iter.x <= 0 { var zero T return zero, false } iter.x-- return iter.item, true } // Slice returns an iterator over the elements of s. func Slice[T any](s []T) Iterator[T] { return &sliceIterator[T]{ a: s, } } type sliceIterator[T any] struct { a []T } func (iter *sliceIterator[T]) Next() (T, bool) { if len(iter.a) == 0 { var zero T return zero, false } item := iter.a[0] iter.a = iter.a[1:] return item, true } // Peekable allows viewing the next item from an iterator without consuming it. type Peekable[T any] interface { Iterator[T] // Peek returns the next item of the iterator if there is one without consuming it. // // If Peek returns a value, the next call to Next will return the same value. Peek() (T, bool) } // WithPeek returns iter with a Peek() method attached. func WithPeek[T any](iter Iterator[T]) Peekable[T] { return &peekable[T]{inner: iter, has: false} } type peekable[T any] struct { inner Iterator[T] curr T has bool } func (iter *peekable[T]) Next() (T, bool) { if iter.has { item := iter.curr iter.has = false var zero T iter.curr = zero return item, true } return iter.inner.Next() } func (iter *peekable[T]) Peek() (T, bool) { if !iter.has { iter.curr, iter.has = iter.inner.Next() } return iter.curr, iter.has } //////////////////////////////////////////////////////////////////////////////////////////////////// // Reducers // // Functions that consume an iterator and produce some kind of final value. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Collect advances iter to the end and returns all of the items seen as a slice. func Collect[T any](iter Iterator[T]) []T { return Reduce(iter, nil, func(out []T, item T) []T { return append(out, item) }) } // Equal returns true if the given iterators yield the same items in the same order. Consumes the // iterators. func Equal[T comparable](iters ...Iterator[T]) bool { if len(iters) == 0 { return true } for { item, ok := iters[0].Next() for i := 1; i < len(iters); i++ { iterIItem, iterIOk := iters[i].Next() if ok != iterIOk { return false } if ok && item != iterIItem { return false } } if !ok { return true } } } // Last consumes iter and returns the last n items. If iter yields fewer than n items, Last returns // all of them. func Last[T any](iter Iterator[T], n int) []T { buf := make([]T, n) i := 0 for { item, ok := iter.Next() if !ok { break } buf[i%n] = item i++ } if i < n { return buf[:i] } out := make([]T, n) idx := i % n copy(out, buf[idx:]) copy(out[n-idx:], buf[:idx]) return out } // One returns the only item yielded by iter. Returns false in the second return if iter yields zero // or more than one item. func One[T any](iter Iterator[T]) (T, bool) { var zero T x, ok := iter.Next() if !ok { return zero, false } _, ok = iter.Next() if ok { return zero, false } return x, true } // Reduce reduces iter to a single value using the reduction function f. func Reduce[T any, U any](iter Iterator[T], initial U, f func(U, T) U) U { acc := initial for { item, ok := iter.Next() if !ok { return acc } acc = f(acc, item) } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Combinators // // Functions that take and return iterators, transforming the output somehow. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Chunk returns an iterator over non-overlapping chunks of size chunkSize. The last chunk will be // smaller than chunkSize if the iterator does not contain an even multiple. func Chunk[T any](iter Iterator[T], chunkSize int) Iterator[[]T] { return &chunkIterator[T]{ inner: iter, chunkSize: chunkSize, } } type chunkIterator[T any] struct { inner Iterator[T] chunkSize int } func (iter *chunkIterator[T]) Next() ([]T, bool) { chunk := make([]T, 0, iter.chunkSize) for { item, ok := iter.inner.Next() if !ok { break } chunk = append(chunk, item) if len(chunk) == iter.chunkSize { return chunk, true } } if len(chunk) > 0 { return chunk, true } return nil, false } // Compact elides adjacent duplicates from iter. func Compact[T comparable](iter Iterator[T]) Iterator[T] { return CompactFunc(iter, func(a, b T) bool { return a == b }) } // CompactFunc elides adjacent duplicates from iter, using eq to determine duplicates. func CompactFunc[T any](iter Iterator[T], eq func(T, T) bool) Iterator[T] { return &compactIterator[T]{ inner: iter, first: true, eq: eq, } } type compactIterator[T any] struct { inner Iterator[T] prev T first bool eq func(T, T) bool } func (iter *compactIterator[T]) Next() (T, bool) { for { item, ok := iter.inner.Next() if !ok { return item, false } if iter.first { iter.first = false iter.prev = item return item, true } else if !iter.eq(iter.prev, item) { iter.prev = item return item, true } } } // Filter returns an iterator that yields only the items from iter for which keep returns true. func Filter[T any](iter Iterator[T], keep func(T) bool) Iterator[T] { return &filterIterator[T]{inner: iter, keep: keep} } type filterIterator[T any] struct { inner Iterator[T] keep func(T) bool } func (iter *filterIterator[T]) Next() (T, bool) { for { item, ok := iter.inner.Next() if !ok { break } if iter.keep(item) { return item, true } } var zero T return zero, false } // First returns an iterator that yields the first n items from iter. func First[T any](iter Iterator[T], n int) Iterator[T] { return &firstIterator[T]{inner: iter, x: n} } type firstIterator[T any] struct { inner Iterator[T] x int } func (iter *firstIterator[T]) Next() (T, bool) { if iter.x <= 0 { var zero T return zero, false } iter.x-- return iter.inner.Next() } // Flatten returns an iterator that yields all items from all iterators yielded by iter. func Flatten[T any](iter Iterator[Iterator[T]]) Iterator[T] { return &flattenIterator[T]{inner: iter} } type flattenIterator[T any] struct { inner Iterator[Iterator[T]] curr Iterator[T] } func (iter *flattenIterator[T]) Next() (T, bool) { for { if iter.curr == nil { var ok bool iter.curr, ok = iter.inner.Next() if !ok { var zero T return zero, false } } item, ok := iter.curr.Next() if !ok { iter.curr = nil continue } return item, true } } // Join returns an Iterator that returns all elements of iters[0], then all elements of iters[1], // and so on. func Join[T any](iters ...Iterator[T]) Iterator[T] { return &joinIterator[T]{ iters: iters, } } type joinIterator[T any] struct { iters []Iterator[T] } func (iter *joinIterator[T]) Next() (T, bool) { for len(iter.iters) > 0 { item, ok := iter.iters[0].Next() if ok { return item, true } iter.iters = iter.iters[1:] } var zero T return zero, false } // Map transforms the results of iter using the conversion f. func Map[T any, U any](iter Iterator[T], f func(t T) U) Iterator[U] { return &mapIterator[T, U]{ inner: iter, f: f, } } type mapIterator[T any, U any] struct { inner Iterator[T] f func(T) U } func (iter *mapIterator[T, U]) Next() (U, bool) { var zero U item, ok := iter.inner.Next() if !ok { return zero, false } return iter.f(item), true } // Runs returns an iterator of iterators. The inner iterators yield contiguous elements from iter // such that same(a, b) returns true for any a and b in the run. // // The inner iterator should be drained before calling Next on the outer iterator. // // same(a, a) must return true. If same(a, b) and same(b, c) both return true, then same(a, c) must // also. func Runs[T any](iter Iterator[T], same func(a, b T) bool) Iterator[Iterator[T]] { return &runsIterator[T]{ inner: WithPeek(iter), same: same, curr: nil, } } type runsIterator[T any] struct { inner Peekable[T] same func(a, b T) bool curr *runsInnerIterator[T] } func (iter *runsIterator[T]) Next() (Iterator[T], bool) { if iter.curr != nil { for { _, ok := iter.curr.Next() if !ok { break } } iter.curr = nil } item, ok := iter.inner.Peek() if !ok { return nil, false } iter.curr = &runsInnerIterator[T]{parent: iter, prev: item} return iter.curr, true } type runsInnerIterator[T any] struct { parent *runsIterator[T] prev T } func (iter *runsInnerIterator[T]) Next() (T, bool) { var zero T if iter.parent == nil { return zero, false } item, ok := iter.parent.inner.Peek() if !ok || !iter.parent.same(iter.prev, item) { iter.parent = nil return zero, false } return iter.parent.inner.Next() } // While returns an iterator that terminates before the first item from iter for which f returns // false. func While[T any](iter Iterator[T], f func(T) bool) Iterator[T] { return &whileIterator[T]{ inner: iter, f: f, done: false, } } type whileIterator[T any] struct { inner Iterator[T] f func(T) bool done bool } func (iter *whileIterator[T]) Next() (T, bool) { var zero T if iter.done { return zero, false } item, ok := iter.inner.Next() if !ok { return zero, false } if !iter.f(item) { iter.done = true return zero, false } return item, true } juniper-0.15.1/iterator/iterator_example_test.go000066400000000000000000000073341453027036000220440ustar00rootroot00000000000000package iterator_test import ( "fmt" "math" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xmath" ) func ExampleIterator() { iter := iterator.Counter(5) for { item, ok := iter.Next() if !ok { break } fmt.Println(item) } // Output: // 0 // 1 // 2 // 3 // 4 } func ExampleChunk() { iter := iterator.Slice([]string{"a", "b", "c", "d", "e", "f", "g", "h"}) chunked := iterator.Chunk(iter, 3) item, _ := chunked.Next() fmt.Println(item) item, _ = chunked.Next() fmt.Println(item) item, _ = chunked.Next() fmt.Println(item) // Output: // [a b c] // [d e f] // [g h] } func ExampleCompact() { iter := iterator.Slice([]string{"a", "a", "b", "c", "c", "c", "a"}) compacted := iterator.Compact(iter) fmt.Println(iterator.Collect(compacted)) // Output: // [a b c a] } func ExampleCompactFunc() { iter := iterator.Slice([]string{ "bank", "beach", "ghost", "goat", "group", "yaw", "yew", }) compacted := iterator.CompactFunc(iter, func(a, b string) bool { return a[0] == b[0] }) fmt.Println(iterator.Collect(compacted)) // Output: // [bank ghost yaw] } func ExampleEqual() { fmt.Println( iterator.Equal( iterator.Slice([]string{"a", "b", "c"}), iterator.Slice([]string{"a", "b", "c"}), ), ) fmt.Println( iterator.Equal( iterator.Slice([]string{"a", "b", "c"}), iterator.Slice([]string{"a", "b", "c", "d"}), ), ) // Output: // true // false } func ExampleFilter() { iter := iterator.Slice([]int{1, 2, 3, 4, 5, 6}) evens := iterator.Filter(iter, func(x int) bool { return x%2 == 0 }) fmt.Println(iterator.Collect(evens)) // Output: // [2 4 6] } func ExampleFlatten() { iter := iterator.Slice([]iterator.Iterator[int]{ iterator.Slice([]int{0, 1, 2}), iterator.Slice([]int{3, 4, 5, 6}), iterator.Slice([]int{7}), }) all := iterator.Flatten(iter) fmt.Println(iterator.Collect(all)) // Output: // [0 1 2 3 4 5 6 7] } func ExampleFirst() { iter := iterator.Slice([]string{"a", "b", "c", "d", "e"}) first3 := iterator.First(iter, 3) fmt.Println(iterator.Collect(first3)) // Output: // [a b c] } func ExampleJoin() { iter := iterator.Join( iterator.Counter(3), iterator.Counter(5), iterator.Counter(2), ) fmt.Println(iterator.Collect(iter)) // Output: // [0 1 2 0 1 2 3 4 0 1] } func ExampleLast() { iter := iterator.Counter(10) last3 := iterator.Last(iter, 3) fmt.Println(last3) iter = iterator.Counter(2) last3 = iterator.Last(iter, 3) fmt.Println(last3) // Output: // [7 8 9] // [0 1] } func ExampleOne() { iter := iterator.Slice([]string{"a"}) item, ok := iterator.One(iter) fmt.Println(ok) fmt.Println(item) iter = iterator.Slice([]string{"a", "b"}) item, ok = iterator.One(iter) fmt.Println(ok) // Output: // true // a // false } func ExampleRuns() { iter := iterator.Slice([]int{2, 4, 0, 7, 1, 3, 9, 2, 8}) parityRuns := iterator.Runs(iter, func(a, b int) bool { return a%2 == b%2 }) fmt.Println(iterator.Collect(iterator.Map(parityRuns, iterator.Collect[int]))) // Output: // [[2 4 0] [7 1 3 9] [2 8]] } func ExampleReduce() { x := []int{3, 1, 2} iter := iterator.Slice(x) sum := iterator.Reduce(iter, 0, func(x, y int) int { return x + y }) fmt.Println(sum) iter = iterator.Slice(x) min := iterator.Reduce(iter, math.MaxInt, xmath.Min[int]) fmt.Println(min) // Output: // 6 // 1 } func ExampleRepeat() { iter := iterator.Repeat("a", 4) fmt.Println(iterator.Collect(iter)) // Output: // [a a a a] } func ExampleWhile() { iter := iterator.Slice([]string{ "aardvark", "badger", "cheetah", "dinosaur", "egret", }) beforeD := iterator.While(iter, func(s string) bool { return s < "d" }) fmt.Println(iterator.Collect(beforeD)) // Output: // [aardvark badger cheetah] } juniper-0.15.1/parallel/000077500000000000000000000000001453027036000150465ustar00rootroot00000000000000juniper-0.15.1/parallel/doc.go000066400000000000000000000001301453027036000161340ustar00rootroot00000000000000// Package parallel provides primitives for running tasks in parallel. package parallel juniper-0.15.1/parallel/parallel.go000066400000000000000000000200001453027036000171610ustar00rootroot00000000000000package parallel import ( "context" "runtime" "sync" "sync/atomic" "golang.org/x/sync/errgroup" "github.com/bradenaw/juniper/container/xheap" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/stream" ) // Do calls f from parallelism goroutines n times, providing each invocation a unique i in [0, n). // // If parallelism <= 0, uses GOMAXPROCS instead. func Do( parallelism int, n int, f func(i int), ) { if parallelism <= 0 { parallelism = runtime.GOMAXPROCS(-1) } if parallelism > n { parallelism = n } if parallelism == 1 { for i := 0; i < n; i++ { f(i) } return } x := int32(-1) var wg sync.WaitGroup wg.Add(parallelism) for j := 0; j < parallelism; j++ { go func() { defer wg.Done() for { i := int(atomic.AddInt32(&x, 1)) if i >= n { return } f(i) } }() } wg.Wait() return } // DoContext calls f from parallelism goroutines n times, providing each invocation a unique i in // [0, n). // // If any call to f returns an error the context passed to invocations of f is cancelled, no further // calls to f are made, and Do returns the first error encountered. // // If parallelism <= 0, uses GOMAXPROCS instead. func DoContext( ctx context.Context, parallelism int, n int, f func(ctx context.Context, i int) error, ) error { if parallelism <= 0 { parallelism = runtime.GOMAXPROCS(-1) } if parallelism > n { parallelism = n } if parallelism == 1 { for i := 0; i < n; i++ { err := f(ctx, i) if err != nil { return err } } return nil } x := int32(-1) eg, ctx := errgroup.WithContext(ctx) for j := 0; j < parallelism; j++ { eg.Go(func() error { for { i := int(atomic.AddInt32(&x, 1)) if i >= n { return nil } if ctx.Err() != nil { return ctx.Err() } err := f(ctx, i) if err != nil { return err } } }) } return eg.Wait() } // Map uses parallelism goroutines to call f once for each element of in. out[i] is the // result of f for in[i]. // // If parallelism <= 0, uses GOMAXPROCS instead. func Map[T any, U any]( parallelism int, in []T, f func(in T) U, ) []U { out := make([]U, len(in)) Do(parallelism, len(in), func(i int) { out[i] = f(in[i]) }) return out } // MapContext uses parallelism goroutines to call f once for each element of in. out[i] is the // result of f for in[i]. // // If any call to f returns an error the context passed to invocations of f is cancelled, no further // calls to f are made, and Map returns the first error encountered. // // If parallelism <= 0, uses GOMAXPROCS instead. func MapContext[T any, U any]( ctx context.Context, parallelism int, in []T, f func(ctx context.Context, in T) (U, error), ) ([]U, error) { out := make([]U, len(in)) err := DoContext(ctx, parallelism, len(in), func(ctx context.Context, i int) error { var err error out[i], err = f(ctx, in[i]) return err }) if err != nil { return nil, err } return out, nil } // MapIterator uses parallelism goroutines to call f once for each element yielded by iter. The // returned iterator returns these results in the same order that iter yielded them in. // // This iterator, in contrast with most, must be consumed completely or it will leak the goroutines. // // If parallelism <= 0, uses GOMAXPROCS instead. // // bufferSize is the size of the work buffer. A larger buffer uses more memory but gives better // throughput in the face of larger variance in the processing time for f. func MapIterator[T any, U any]( iter iterator.Iterator[T], parallelism int, bufferSize int, f func(T) U, ) iterator.Iterator[U] { if parallelism <= 0 { parallelism = runtime.GOMAXPROCS(-1) } if bufferSize < parallelism { bufferSize = parallelism } in := make(chan valueAndIndex[T]) mIter := &mapIterator[U]{ ch: make(chan valueAndIndex[U]), h: xheap.New(func(a, b valueAndIndex[U]) bool { return a.idx < b.idx }, nil), i: 0, bufferSize: bufferSize, inFlight: 0, } mIter.cond = sync.NewCond(&mIter.m) go func() { i := 0 for { item, ok := iter.Next() if !ok { break } mIter.m.Lock() for mIter.inFlight >= bufferSize { mIter.cond.Wait() } mIter.inFlight++ mIter.m.Unlock() in <- valueAndIndex[T]{ value: item, idx: i, } i++ } close(in) }() nDone := uint32(0) for i := 0; i < parallelism; i++ { go func() { for item := range in { u := f(item.value) mIter.ch <- valueAndIndex[U]{value: u, idx: item.idx} } if atomic.AddUint32(&nDone, 1) == uint32(parallelism) { close(mIter.ch) } }() } return mIter } type mapIterator[U any] struct { ch chan valueAndIndex[U] m sync.Mutex cond *sync.Cond bufferSize int inFlight int h xheap.Heap[valueAndIndex[U]] i int } func (iter *mapIterator[U]) Next() (U, bool) { for { if iter.h.Len() > 0 && iter.h.Peek().idx == iter.i { item := iter.h.Pop() iter.i++ iter.m.Lock() iter.inFlight-- if iter.inFlight == iter.bufferSize-1 { iter.cond.Signal() } iter.m.Unlock() return item.value, true } item, ok := <-iter.ch if !ok { var zero U return zero, false } iter.h.Push(item) } } type valueAndIndex[T any] struct { value T idx int } // MapStream uses parallelism goroutines to call f once for each element yielded by s. The returned // stream returns these results in the same order that s yielded them in. // // If any call to f returns an error the context passed to invocations of f is cancelled, no further // calls to f are made, and the returned stream's Next returns the first error encountered. // // If parallelism <= 0, uses GOMAXPROCS instead. // // bufferSize is the size of the work buffer. A larger buffer uses more memory but gives better // throughput in the face of larger variance in the processing time for f. func MapStream[T any, U any]( ctx context.Context, s stream.Stream[T], parallelism int, bufferSize int, f func(context.Context, T) (U, error), ) stream.Stream[U] { if parallelism <= 0 { parallelism = runtime.GOMAXPROCS(-1) } if bufferSize < parallelism { bufferSize = parallelism } in := make(chan valueAndIndex[T]) ready := make(chan struct{}, bufferSize) for i := 0; i < bufferSize; i++ { ready <- struct{}{} } ctx, cancel := context.WithCancel(ctx) eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { defer s.Close() defer close(in) i := 0 for { item, err := s.Next(ctx) if err == stream.End { break } else if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() case <-ready: } select { case <-ctx.Done(): return ctx.Err() case in <- valueAndIndex[T]{ value: item, idx: i, }: } i++ } return nil }) c := make(chan valueAndIndex[U], bufferSize) nDone := uint32(0) for i := 0; i < parallelism; i++ { eg.Go(func() error { defer func() { if atomic.AddUint32(&nDone, 1) == uint32(parallelism) { close(c) } }() for item := range in { u, err := f(ctx, item.value) if err != nil { return err } select { case c <- valueAndIndex[U]{value: u, idx: item.idx}: case <-ctx.Done(): return ctx.Err() } } return nil }) } return &mapStream[U]{ cancel: cancel, eg: eg, c: c, ready: ready, h: xheap.New(func(a, b valueAndIndex[U]) bool { return a.idx < b.idx }, nil), i: 0, } } type mapStream[U any] struct { cancel context.CancelFunc eg *errgroup.Group c <-chan valueAndIndex[U] ready chan struct{} h xheap.Heap[valueAndIndex[U]] i int } func (s *mapStream[U]) Next(ctx context.Context) (U, error) { var zero U for { if s.h.Len() > 0 && s.h.Peek().idx == s.i { item := s.h.Pop() s.i++ s.ready <- struct{}{} return item.value, nil } select { case item, ok := <-s.c: if !ok { err := s.eg.Wait() if err != nil { return zero, err } return zero, stream.End } s.h.Push(item) case <-ctx.Done(): return zero, ctx.Err() } } } func (s *mapStream[U]) Close() { s.cancel() _ = s.eg.Wait() } juniper-0.15.1/parallel/parallel_test.go000066400000000000000000000045041453027036000202330ustar00rootroot00000000000000package parallel import ( "context" "errors" "fmt" "strconv" "testing" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/stream" ) func TestMap(t *testing.T) { for _, parallelism := range []int{1, 2} { t.Run(fmt.Sprintf("parallelism=%d", parallelism), func(t *testing.T) { ints := []int{0, 1, 2, 3, 4} strs := Map( parallelism, ints, func(i int) string { return strconv.Itoa(i) }, ) require2.SlicesEqual(t, []string{"0", "1", "2", "3", "4"}, strs) }) } } func TestMapContext(t *testing.T) { for _, parallelism := range []int{1, 2} { t.Run(fmt.Sprintf("parallelism=%d", parallelism), func(t *testing.T) { ctx := context.Background() ints := []int{0, 1, 2, 3, 4} strs, err := MapContext( ctx, parallelism, ints, func(ctx context.Context, i int) (string, error) { return strconv.Itoa(i), nil }, ) require2.NoError(t, err) require2.SlicesEqual(t, []string{"0", "1", "2", "3", "4"}, strs) }) } } func TestMapIterator(t *testing.T) { strs := MapIterator( iterator.Counter(5), 2, // parallelism 0, // bufferSize func(i int) string { return strconv.Itoa(i) }, ) require2.SlicesEqual(t, []string{"0", "1", "2", "3", "4"}, iterator.Collect(strs)) } func TestMapStream(t *testing.T) { strsStream := MapStream( context.Background(), stream.FromIterator(iterator.Counter(5)), 2, // parallelism 0, // bufferSize func(ctx context.Context, i int) (string, error) { return strconv.Itoa(i), nil }, ) strs, err := stream.Collect(context.Background(), strsStream) require2.NoError(t, err) require2.SlicesEqual(t, []string{"0", "1", "2", "3", "4"}, strs) } func TestMapStreamError(t *testing.T) { sender, receiver := stream.Pipe[int](0) strsStream := MapStream( context.Background(), receiver, 2, // parallelism 0, // bufferSize func(ctx context.Context, i int) (string, error) { return strconv.Itoa(i), nil }, ) oopsError := errors.New("oops") err := sender.Send(context.Background(), 0) require2.NoError(t, err) err = sender.Send(context.Background(), 1) require2.NoError(t, err) sender.Close(oopsError) for { _, err := strsStream.Next(context.Background()) if err == nil { continue } if err != oopsError { t.Fatalf("%s", err) } break } } juniper-0.15.1/stream/000077500000000000000000000000001453027036000145455ustar00rootroot00000000000000juniper-0.15.1/stream/stream.go000066400000000000000000000601261453027036000163740ustar00rootroot00000000000000// Package stream allows iterating over sequences of values where iteration may fail, for example // when it involves I/O. package stream import ( "context" "errors" "sync" "sync/atomic" "time" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xmath" ) var ( // End is returned from Stream.Next when iteration ends successfully. End = errors.New("end of stream") // ErrClosedPipe is returned from PipeSender.Send() when the associated stream has already been // closed. ErrClosedPipe = errors.New("closed pipe") // ErrMoreThanOne is returned from One when a Stream yielded more than one item. ErrMoreThanOne = errors.New("stream had more than one item") // ErrEmpty is returned from One when a Stream yielded no items. ErrEmpty = errors.New("stream empty") ) // Stream is used to iterate over a sequence of values. It is similar to Iterator, except intended // for use when iteration may fail for some reason, usually because the sequence requires I/O to // produce. // // Streams and the combinator functions are lazy, meaning they do no work until a call to Next(). // // Streams do not need to be fully consumed, but streams must be closed. Functions in this package // that are passed streams expect to be the sole user of that stream going forward, and so will // handle closing on your behalf so long as all streams they return are closed appropriately. type Stream[T any] interface { // Next advances the stream and returns the next item. If the stream is already over, Next // returns stream.End in the second return. Note that the final item of the stream has nil in // the second return, and it's the following call that returns stream.End. // // Once a Next call returns stream.End, it is expected that the Stream will return stream.End to // every Next call afterwards. Next(ctx context.Context) (T, error) // Close ends receiving from the stream. It is invalid to call Next after calling Close. Close() } //////////////////////////////////////////////////////////////////////////////////////////////////// // Converters + Constructors // // Functions that produce a Stream. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Chan returns a Stream that receives values from c. func Chan[T any](c <-chan T) Stream[T] { return &chanStream[T]{c: c} } type chanStream[T any] struct { c <-chan T } func (s *chanStream[T]) Next(ctx context.Context) (T, error) { var zero T select { case item, ok := <-s.c: if !ok { return zero, End } return item, nil case <-ctx.Done(): return zero, ctx.Err() } } func (s *chanStream[T]) Close() {} // Empty returns a Stream that yields stream.End immediately. func Empty[T any]() Stream[T] { return emptyStream[T]{} } type emptyStream[T any] struct{} func (s emptyStream[T]) Next(ctx context.Context) (T, error) { var zero T return zero, End } func (s emptyStream[T]) Close() {} // Error returns a Stream that immediately produces err from Next. func Error[T any](err error) Stream[T] { return errorStream[T]{err} } type errorStream[T any] struct { err error } func (s errorStream[T]) Next(ctx context.Context) (T, error) { var zero T return zero, s.err } func (s errorStream[T]) Close() {} // FromIterator returns a Stream that yields the values from iter. This stream ignores the context // passed to Next during the call to iter.Next. func FromIterator[T any](iter iterator.Iterator[T]) Stream[T] { return &iteratorStream[T]{iter: iter} } type iteratorStream[T any] struct { iter iterator.Iterator[T] } func (s *iteratorStream[T]) Next(ctx context.Context) (T, error) { var zero T if ctx.Err() != nil { return zero, ctx.Err() } item, ok := s.iter.Next() if !ok { return zero, End } return item, nil } func (s *iteratorStream[T]) Close() {} // Pipe returns a linked sender and receiver pair. Values sent using sender.Send will be delivered // to the given Stream. The Stream will terminate when the sender is closed. // // bufferSize is the number of elements in the buffer between the sender and the receiver. 0 has the // same meaning as for the built-in make(chan). func Pipe[T any](bufferSize int) (*PipeSender[T], Stream[T]) { c := make(chan T, bufferSize) senderDone := make(chan struct{}) senderErr := new(error) streamDone := make(chan struct{}) sender := &PipeSender[T]{ c: c, senderErr: senderErr, senderDone: senderDone, streamDone: streamDone, } receiver := &pipeStream[T]{ c: c, senderErr: senderErr, senderDone: senderDone, streamDone: streamDone, } return sender, receiver } // PipeSender is the send half of a pipe returned by Pipe. type PipeSender[T any] struct { c chan<- T senderErr *error senderDone chan struct{} streamDone <-chan struct{} } // Send attempts to send x to the receiver. If the receiver closes before x can be sent, returns // ErrClosedPipe immediately. If ctx expires before x can be sent, returns ctx.Err(). // // A nil return does not necessarily mean that the receiver will see x, since the receiver may close // early. // // Send may be called concurrently with other Sends and with Close. func (s *PipeSender[T]) Send(ctx context.Context, x T) error { select { case <-ctx.Done(): return ctx.Err() case <-s.streamDone: return ErrClosedPipe case <-s.senderDone: return *s.senderErr case s.c <- x: return nil } } // TrySend attempts to send x to the receiver, but returns (false, nil) if the pipe's buffer is // already full instead of blocking. If the receiver is already closed, returns ErrClosedPipe. If // ctx expires before x can be sent, returns ctx.Err(). // // A (true, nil) return does not necessarily mean that the receiver will see x, since the receiver // may close early. // // TrySend may be called concurrently with other Sends and with Close. func (s *PipeSender[T]) TrySend(ctx context.Context, x T) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() case <-s.streamDone: return false, ErrClosedPipe case <-s.senderDone: return false, *s.senderErr default: } select { case s.c <- x: return true, nil default: return false, nil } } // Close closes the PipeSender, signalling to the receiver that no more values will be sent. If an // error is provided, it will surface to the receiver's Next and to any concurrent Sends. // // Close may only be called once. func (s *PipeSender[T]) Close(err error) { *s.senderErr = err close(s.senderDone) } type pipeStream[T any] struct { c <-chan T senderErr *error senderDone <-chan struct{} streamDone chan<- struct{} } func (s *pipeStream[T]) Next(ctx context.Context) (T, error) { var zero T select { case <-ctx.Done(): return zero, ctx.Err() case item := <-s.c: return item, nil case <-s.senderDone: err := *s.senderErr if err != nil { return zero, err } return zero, End } } func (s *pipeStream[T]) Close() { close(s.streamDone) } // Peekable allows viewing the next item from a stream without consuming it. type Peekable[T any] interface { Stream[T] // Peek returns the next item of the stream if there is one without consuming it. // // If Peek returns a value, the next call to Next will return the same value. Peek(ctx context.Context) (T, error) } // WithPeek returns iter with a Peek() method attached. func WithPeek[T any](s Stream[T]) Peekable[T] { return &peekable[T]{inner: s, has: false} } type peekable[T any] struct { inner Stream[T] curr T has bool } func (s *peekable[T]) Next(ctx context.Context) (T, error) { if s.has { item := s.curr s.has = false var zero T s.curr = zero return item, nil } return s.inner.Next(ctx) } func (s *peekable[T]) Peek(ctx context.Context) (T, error) { var zero T if !s.has { var err error s.curr, err = s.inner.Next(ctx) if err == End { s.has = false return zero, End } else if err != nil { return zero, err } s.has = true } return s.curr, nil } func (s *peekable[T]) Close() { s.inner.Close() } //////////////////////////////////////////////////////////////////////////////////////////////////// // Reducers // // Functions that consume a stream and produce some kind of final value. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Collect advances s to the end and returns all of the items seen as a slice. func Collect[T any](ctx context.Context, s Stream[T]) ([]T, error) { defer s.Close() var out []T for { item, err := s.Next(ctx) if err == End { return out, nil } else if err != nil { return nil, err } out = append(out, item) } } // Last consumes s and returns the last n items. If s yields fewer than n items, Last returns // all of them. func Last[T any](ctx context.Context, s Stream[T], n int) ([]T, error) { defer s.Close() buf := make([]T, n) i := 0 for { item, err := s.Next(ctx) if err == End { break } else if err != nil { return nil, err } buf[i%n] = item i++ } if i < n { return buf[:i], nil } out := make([]T, n) idx := i % n copy(out, buf[idx:]) copy(out[n-idx:], buf[:idx]) return out, nil } // One returns the only item that s yields. Returns an error if encountered, or if s yields zero or // more than one item. func One[T any](ctx context.Context, s Stream[T]) (T, error) { var zero T x, err := s.Next(ctx) if err == End { return zero, ErrEmpty } else if err != nil { return zero, err } _, err = s.Next(ctx) if err == End { return x, nil } else if err != nil { return zero, err } return zero, ErrMoreThanOne } // Reduce reduces s to a single value using the reduction function f. func Reduce[T any, U any]( ctx context.Context, s Stream[T], initial U, f func(U, T) (U, error), ) (U, error) { defer s.Close() acc := initial for { item, err := s.Next(ctx) if err == End { return acc, nil } else if err != nil { return acc, err } acc, err = f(acc, item) if err != nil { return acc, err } } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Combinators // // Functions that take and return iterators, transforming the output somehow. // //////////////////////////////////////////////////////////////////////////////////////////////////// // Batch returns a stream of non-overlapping batches from s of size batchSize. Batch is similar to // Chunk with the added feature that an underfilled batch will be delivered to the output stream if // any item has been in the batch for more than maxWait. func Batch[T any](s Stream[T], maxWait time.Duration, batchSize int) Stream[[]T] { return BatchFunc(s, maxWait, func(batch []T) bool { return len(batch) >= batchSize }) } // BatchFunc returns a stream of non-overlapping batches from s, using full to determine when a // batch is full. BatchFunc is similar to Chunk with the added feature that an underfilled batch // will be delivered to the output stream if any item has been in the batch for more than maxWait. func BatchFunc[T any]( s Stream[T], maxWait time.Duration, full func(batch []T) bool, ) Stream[[]T] { bgCtx, bgCancel := context.WithCancel(context.Background()) out := &batchStream[T]{ batchC: make(chan []T), waiting: make(chan struct{}), bgCancel: bgCancel, } c := make(chan T) out.wg.Add(2) go func() { defer out.wg.Done() defer s.Close() defer close(c) for { item, err := s.Next(bgCtx) if err == End { break } else if err == context.Canceled && bgCtx.Err() == context.Canceled { break } else if err != nil { out.err = err return } c <- item } }() // Build up batches and flush them when either: // A) The batch is full. // B) It's been at least maxWait since the first item arrived _and_ there is somebody waiting. // No sense in underfilling a batch if nobody's actually asking for it yet. // C) There aren't any more items. go func() { defer out.wg.Done() var batch []T var batchStart time.Time batchSizeEstimate := 0 var timer *time.Timer // Starts off as nil so that the timerC select arm isn't chosen until populated. Also set // to nil when we've already stopped or received from timer to know when it needs to be // drained. var timerC <-chan time.Time waitingAtEmpty := false defer func() { if timer != nil { timer.Stop() } close(out.batchC) }() flush := func() bool { select { case <-bgCtx.Done(): return false case out.batchC <- batch: } batchSizeEstimate = (batchSizeEstimate + len(batch)) / 2 batch = make([]T, 0, xmath.Max(len(batch), batchSizeEstimate*11/10)) waitingAtEmpty = false return true } stopTimer := func() { if timer == nil { return } stopped := timer.Stop() if !stopped && timerC != nil { <-timerC } timerC = nil } startTimer := func() { stopTimer() if timer == nil { timer = time.NewTimer(maxWait - time.Since(batchStart)) } else { timer.Reset(maxWait - time.Since(batchStart)) } timerC = timer.C } for { select { case item, ok := <-c: if !ok { // Case (C): we're done. // Flush what we have so far, if any. if len(batch) > 0 { _ = flush() } return } batch = append(batch, item) if full(batch) { // Case (A): the batch is full. stopTimer() if !flush() { return } } if len(batch) == 1 { // Bookkeeping for case (B). batchStart = time.Now() if waitingAtEmpty { startTimer() } } case <-timerC: // Case (B). timerC = nil // Being here already implies the conditions are true, since the timer is only // running while the batch is non-empty and there's somebody waiting. if !flush() { return } case <-out.waiting: // Bookkeeping for case (B). if len(batch) > 0 { // Time already elapsed, just deliver the batch now. if time.Since(batchStart) > maxWait { if !flush() { return } } else { startTimer() } } else { // Timer will start when the first item shows up. waitingAtEmpty = true } } } }() return out } type batchStream[T any] struct { bgCancel context.CancelFunc wg sync.WaitGroup batchC chan []T // populated at most once and always before batchC closes err error waiting chan struct{} } func (iter *batchStream[T]) Next(ctx context.Context) ([]T, error) { select { // There might be a batch already ready because it filled before we even asked. case batch, ok := <-iter.batchC: if !ok { if iter.err != nil { return nil, iter.err } return nil, End } return batch, nil // Otherwise, we need to let the sender know we're waiting so that they can flush an underfilled // batch at interval. case iter.waiting <- struct{}{}: select { case batch, ok := <-iter.batchC: if !ok { if iter.err != nil { return nil, iter.err } return nil, End } return batch, nil case <-ctx.Done(): return nil, ctx.Err() } case <-ctx.Done(): return nil, ctx.Err() } } func (iter *batchStream[T]) Close() { iter.bgCancel() iter.wg.Wait() } // Chunk returns a stream of non-overlapping chunks from s of size chunkSize. The last chunk will be // smaller than chunkSize if the stream does not contain an even multiple. func Chunk[T any](s Stream[T], chunkSize int) Stream[[]T] { return &chunkStream[T]{ inner: s, chunkSize: chunkSize, } } type chunkStream[T any] struct { inner Stream[T] chunkSize int chunk []T } func (s *chunkStream[T]) Next(ctx context.Context) ([]T, error) { for { item, err := s.inner.Next(ctx) if err == End { break } else if err != nil { return nil, err } s.chunk = append(s.chunk, item) if len(s.chunk) == s.chunkSize { chunk := s.chunk s.chunk = make([]T, 0, s.chunkSize) return chunk, nil } } if len(s.chunk) > 0 { chunk := s.chunk s.chunk = make([]T, 0, s.chunkSize) return chunk, nil } return nil, End } func (s *chunkStream[T]) Close() { s.inner.Close() } // Compact elides adjacent duplicates from s. func Compact[T comparable](s Stream[T]) Stream[T] { return CompactFunc(s, func(a, b T) bool { return a == b }) } // CompactFunc elides adjacent duplicates from s, using eq to determine duplicates. func CompactFunc[T any](s Stream[T], eq func(T, T) bool) Stream[T] { return &compactStream[T]{ inner: s, first: true, eq: eq, } } type compactStream[T any] struct { inner Stream[T] prev T first bool eq func(T, T) bool } func (s *compactStream[T]) Next(ctx context.Context) (T, error) { for { item, err := s.inner.Next(ctx) if err != nil { return item, err } if s.first { s.first = false s.prev = item return item, nil } else if !s.eq(s.prev, item) { s.prev = item return item, nil } } } func (s *compactStream[T]) Close() { s.inner.Close() } // Filter returns a Stream that yields only the items from s for which keep returns true. If keep // returns an error, terminates the stream early. func Filter[T any](s Stream[T], keep func(context.Context, T) (bool, error)) Stream[T] { return &filterStream[T]{inner: s, keep: keep} } type filterStream[T any] struct { inner Stream[T] keep func(context.Context, T) (bool, error) } func (s *filterStream[T]) Next(ctx context.Context) (T, error) { var zero T for { item, err := s.inner.Next(ctx) if err != nil { return zero, err } ok, err := s.keep(ctx, item) if err != nil { return zero, err } if ok { return item, nil } } } func (s *filterStream[T]) Close() { s.inner.Close() } // First returns a Stream that yields the first n items from s. func First[T any](s Stream[T], n int) Stream[T] { return &firstStream[T]{inner: s, x: n} } type firstStream[T any] struct { inner Stream[T] x int } func (s *firstStream[T]) Next(ctx context.Context) (T, error) { if s.x <= 0 { var zero T return zero, End } item, err := s.inner.Next(ctx) if err != nil { return item, err } s.x-- return item, nil } func (s *firstStream[T]) Close() { s.inner.Close() } // Flatten returns a stream that yields all items from all streams yielded by s. func Flatten[T any](s Stream[Stream[T]]) Stream[T] { return &flattenStream[T]{inner: s} } type flattenStream[T any] struct { inner Stream[Stream[T]] curr Stream[T] } func (s *flattenStream[T]) Next(ctx context.Context) (T, error) { for { if s.curr == nil { var err error s.curr, err = s.inner.Next(ctx) if err != nil { var zero T return zero, err } } item, err := s.curr.Next(ctx) if err == End { s.curr.Close() s.curr = nil continue } else if err != nil { return item, err } return item, nil } } func (s *flattenStream[T]) Close() { if s.curr != nil { s.curr.Close() } s.inner.Close() } func FlattenSlices[T any](s Stream[[]T]) Stream[T] { return &flattenSlicesStream[T]{ inner: s, } } type flattenSlicesStream[T any] struct { inner Stream[[]T] buffer []T } func (s *flattenSlicesStream[T]) Next(ctx context.Context) (T, error) { var zero T for { if len(s.buffer) > 0 { item := s.buffer[0] s.buffer[0] = zero s.buffer = s.buffer[1:] return item, nil } var err error s.buffer, err = s.inner.Next(ctx) if err != nil { return zero, err } } } func (s *flattenSlicesStream[T]) Close() { s.inner.Close() } // Join returns a Stream that yields all elements from streams[0], then all elements from // streams[1], and so on. func Join[T any](streams ...Stream[T]) Stream[T] { return &joinStream[T]{remaining: streams} } type joinStream[T any] struct { remaining []Stream[T] } func (s *joinStream[T]) Next(ctx context.Context) (T, error) { var zero T for len(s.remaining) > 0 { item, err := s.remaining[0].Next(ctx) if err == End { s.remaining[0].Close() s.remaining = s.remaining[1:] continue } else if err != nil { return zero, err } return item, nil } return zero, End } func (s *joinStream[T]) Close() { for i := range s.remaining { s.remaining[i].Close() } } // Map transforms the values of s using the conversion f. If f returns an error, terminates the // stream early. func Map[T any, U any](s Stream[T], f func(context.Context, T) (U, error)) Stream[U] { return &mapStream[T, U]{inner: s, f: f} } type mapStream[T any, U any] struct { inner Stream[T] f func(context.Context, T) (U, error) } func (s *mapStream[T, U]) Next(ctx context.Context) (U, error) { var zero U item, err := s.inner.Next(ctx) if err != nil { return zero, err } mapped, err := s.f(ctx, item) if err != nil { return zero, err } return mapped, nil } func (s *mapStream[T, U]) Close() { s.inner.Close() } // Merge merges the in streams, returning a stream that yields all elements from all of them as they // arrive. func Merge[T any](in ...Stream[T]) Stream[T] { sender, receiver := Pipe[T](0) nDone := uint32(0) closeOnce := uint32(0) ctx, cancel := context.WithCancel(context.Background()) for i := 0; i < len(in); i++ { i := i go func() { defer func() { if int(atomic.AddUint32(&nDone, 1)) == len(in) && atomic.LoadUint32(&closeOnce) == 0 { sender.Close(nil) } }() for { item, err := in[i].Next(ctx) if err == End { return } else if err != nil { if atomic.CompareAndSwapUint32(&closeOnce, 0, 1) { cancel() sender.Close(err) } return } err = sender.Send(ctx, item) if err != nil { // Implies ctx has expired or the receiver closed, either way we're done. return } } }() } return receiver } type mergeStream[T any] struct { inner Stream[T] cancel func() } func (s *mergeStream[T]) Next(ctx context.Context) (T, error) { return s.inner.Next(ctx) } func (s *mergeStream[T]) Close() { s.inner.Close() s.cancel() } // Runs returns a stream of streams. The inner streams yield contiguous elements from s such that // same(a, b) returns true for any a and b in the run. // // The inner stream should be drained before calling Next on the outer stream. // // same(a, a) must return true. If same(a, b) and same(b, c) both return true, then same(a, c) must // also. func Runs[T any](s Stream[T], same func(a, b T) bool) Stream[Stream[T]] { return &runsStream[T]{ inner: WithPeek(s), same: same, curr: nil, } } type runsStream[T any] struct { inner Peekable[T] same func(a, b T) bool curr *runsInnerStream[T] } func (s *runsStream[T]) Next(ctx context.Context) (Stream[T], error) { if s.curr != nil { for { _, err := s.curr.Next(ctx) if err == End { break } else if err != nil { return nil, err } } s.curr.Close() s.curr = nil } item, err := s.inner.Peek(ctx) if err != nil { return nil, err } s.curr = &runsInnerStream[T]{parent: s, prev: item} return s.curr, nil } func (s *runsStream[T]) Close() { s.inner.Close() } type runsInnerStream[T any] struct { parent *runsStream[T] prev T } func (s *runsInnerStream[T]) Next(ctx context.Context) (T, error) { var zero T if s.parent == nil { return zero, End } item, err := s.parent.inner.Peek(ctx) if err == End { return zero, End } else if err != nil { return zero, err } else if !s.parent.same(s.prev, item) { return zero, End } return s.parent.inner.Next(ctx) } func (s *runsInnerStream[T]) Close() { s.parent = nil } // While returns a Stream that terminates before the first item from s for which f returns false. // If f returns an error, terminates the stream early. func While[T any](s Stream[T], f func(context.Context, T) (bool, error)) Stream[T] { return &whileStream[T]{ inner: s, f: f, } } type whileStream[T any] struct { inner Stream[T] f func(context.Context, T) (bool, error) item T has bool done bool } func (s *whileStream[T]) Next(ctx context.Context) (T, error) { var zero T if s.done { return zero, End } if !s.has { var err error s.item, err = s.inner.Next(ctx) if err != nil { return zero, err } s.has = true } ok, err := s.f(ctx, s.item) if err != nil { return zero, err } if !ok { s.done = true return zero, End } s.has = false return s.item, nil } func (s *whileStream[T]) Close() { s.inner.Close() } juniper-0.15.1/stream/stream_example_test.go000066400000000000000000000205751453027036000211520ustar00rootroot00000000000000package stream_test import ( "context" "errors" "fmt" "time" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/stream" ) func ExampleBatch() { ctx := context.Background() sender, receiver := stream.Pipe[string](0) batchStream := stream.Batch(receiver, 50*time.Millisecond, 3) wait := make(chan struct{}, 3) go func() { _ = sender.Send(ctx, "a") _ = sender.Send(ctx, "b") // Wait here before sending any more to show that the first batch will flush early because // of maxTime=50*time.Millisecond. <-wait _ = sender.Send(ctx, "c") _ = sender.Send(ctx, "d") _ = sender.Send(ctx, "e") _ = sender.Send(ctx, "f") sender.Close(nil) }() defer batchStream.Close() var batches [][]string for { batch, err := batchStream.Next(ctx) if err == stream.End { break } else if err != nil { fmt.Printf("stream ended with error: %s\n", err) return } batches = append(batches, batch) wait <- struct{}{} } fmt.Println(batches) // Output: // [[a b] [c d e] [f]] } func ExampleChan() { ctx := context.Background() c := make(chan string, 3) c <- "a" c <- "b" c <- "c" close(c) s := stream.Chan(c) x, err := stream.Collect(ctx, s) fmt.Println(err) fmt.Println(x) // Output: // // [a b c] } func ExampleChunk() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{"a", "b", "c", "d", "e", "f", "g", "h"})) chunked := stream.Chunk(s, 3) item, _ := chunked.Next(ctx) fmt.Println(item) item, _ = chunked.Next(ctx) fmt.Println(item) item, _ = chunked.Next(ctx) fmt.Println(item) // Output: // [a b c] // [d e f] // [g h] } func ExampleCompact() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{"a", "a", "b", "c", "c", "c", "a"})) compactStream := stream.Compact(s) compacted, _ := stream.Collect(ctx, compactStream) fmt.Println(compacted) // Output: // [a b c a] } func ExampleCompactFunc() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{ "bank", "beach", "ghost", "goat", "group", "yaw", "yew", })) compactStream := stream.CompactFunc(s, func(a, b string) bool { return a[0] == b[0] }) compacted, _ := stream.Collect(ctx, compactStream) fmt.Println(compacted) // Output: // [bank ghost yaw] } func ExampleCollect() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{"a", "b", "c"})) x, err := stream.Collect(ctx, s) fmt.Println(err) fmt.Println(x) // Output: // // [a b c] } func ExampleError() { ctx := context.Background() s := stream.Error[int](errors.New("foo")) _, err := s.Next(ctx) fmt.Println(err) // Output: // foo } func ExampleFilter() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]int{1, 2, 3, 4, 5, 6})) evensStream := stream.Filter(s, func(ctx context.Context, x int) (bool, error) { return x%2 == 0, nil }) evens, _ := stream.Collect(ctx, evensStream) fmt.Println(evens) // Output: // [2 4 6] } func ExampleFirst() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{"a", "b", "c", "d", "e"})) first3Stream := stream.First(s, 3) first3, _ := stream.Collect(ctx, first3Stream) fmt.Println(first3) // Output: // [a b c] } func ExampleFlatten() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]stream.Stream[int]{ stream.FromIterator(iterator.Slice([]int{0, 1, 2})), stream.FromIterator(iterator.Slice([]int{3, 4, 5, 6})), stream.FromIterator(iterator.Slice([]int{7})), })) allStream := stream.Flatten(s) all, _ := stream.Collect(ctx, allStream) fmt.Println(all) // Output: // [0 1 2 3 4 5 6 7] } func ExampleJoin() { ctx := context.Background() s := stream.Join( stream.FromIterator(iterator.Counter(3)), stream.FromIterator(iterator.Counter(5)), stream.FromIterator(iterator.Counter(2)), ) all, _ := stream.Collect(ctx, s) fmt.Println(all) // Output: // [0 1 2 0 1 2 3 4 0 1] } func ExampleLast() { ctx := context.Background() s := stream.FromIterator(iterator.Counter(10)) last5, _ := stream.Last(ctx, s, 5) fmt.Println(last5) s = stream.FromIterator(iterator.Counter(3)) last5, _ = stream.Last(ctx, s, 5) fmt.Println(last5) // Output: // [5 6 7 8 9] // [0 1 2] } func ExampleMap() { ctx := context.Background() s := stream.FromIterator(iterator.Counter(5)) halfStream := stream.Map(s, func(ctx context.Context, x int) (float64, error) { return float64(x) / 2, nil }) all, _ := stream.Collect(ctx, halfStream) fmt.Println(all) // Output: // [0 0.5 1 1.5 2] } func ExampleMerge() { ctx := context.Background() a := stream.FromIterator(iterator.Slice([]string{"a", "b", "c"})) b := stream.FromIterator(iterator.Slice([]string{"x", "y", "z"})) c := stream.FromIterator(iterator.Slice([]string{"m", "n"})) s := stream.Merge(a, b, c) for { item, err := s.Next(ctx) if err == stream.End { break } else if err != nil { panic(err) } fmt.Println(item) } // Unordered output: // m // b // a // n // x // c // z // y } func ExampleOne() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{"a"})) item, err := stream.One(ctx, s) fmt.Println(err == nil) fmt.Println(item) s = stream.FromIterator(iterator.Slice([]string{"a", "b"})) _, err = stream.One(ctx, s) fmt.Println(err == stream.ErrMoreThanOne) s = stream.FromIterator(iterator.Slice([]string{})) _, err = stream.One(ctx, s) fmt.Println(err == stream.ErrEmpty) // Output: // true // a // true // true } func ExamplePeekable() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]int{1, 2, 3})) p := stream.WithPeek(s) x, _ := p.Peek(ctx) fmt.Println(x) x, _ = p.Next(ctx) fmt.Println(x) x, _ = p.Next(ctx) fmt.Println(x) x, _ = p.Peek(ctx) fmt.Println(x) // Output: // 1 // 1 // 2 // 3 } func ExamplePipe() { ctx := context.Background() sender, receiver := stream.Pipe[int](0) go func() { sender.Send(ctx, 1) sender.Send(ctx, 2) sender.Send(ctx, 3) sender.Close(nil) }() defer receiver.Close() for { item, err := receiver.Next(ctx) if err == stream.End { break } else if err != nil { fmt.Printf("stream ended with error: %s\n", err) return } fmt.Println(item) } // Output: // 1 // 2 // 3 } func ExamplePipe_error() { ctx := context.Background() sender, receiver := stream.Pipe[int](0) oopsError := errors.New("oops") go func() { sender.Send(ctx, 1) sender.Close(oopsError) }() defer receiver.Close() for { item, err := receiver.Next(ctx) if err == stream.End { fmt.Println("stream ended normally") break } else if err != nil { fmt.Printf("stream ended with error: %s\n", err) return } fmt.Println(item) } // Output: // 1 // stream ended with error: oops } func ExampleReduce() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]int{1, 2, 3, 4, 5})) sum, _ := stream.Reduce(ctx, s, 0, func(x, y int) (int, error) { return x + y, nil }) fmt.Println(sum) s = stream.FromIterator(iterator.Slice([]int{1, 3, 2, 3})) // Computes the exponentially-weighted moving average of the values of s. first := true ewma, _ := stream.Reduce(ctx, s, 0, func(running float64, item int) (float64, error) { if first { first = false return float64(item), nil } return running*0.5 + float64(item)*0.5, nil }) // Should end as 1/8 + 3/8 + 2/4 + 3/2 // = 1/8 + 3/8 + 4/8 + 12/8 // = 20/8 // = 2.5 fmt.Println(ewma) // Output: // 15 // 2.5 } func ExampleRuns() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]int{2, 4, 0, 7, 1, 3, 9, 2, 8})) // Contiguous runs of evens/odds. parityRuns := stream.Runs(s, func(a, b int) bool { return a%2 == b%2 }) one, _ := parityRuns.Next(ctx) allOne, _ := stream.Collect(ctx, one) fmt.Println(allOne) two, _ := parityRuns.Next(ctx) allTwo, _ := stream.Collect(ctx, two) fmt.Println(allTwo) three, _ := parityRuns.Next(ctx) allThree, _ := stream.Collect(ctx, three) fmt.Println(allThree) // Output: // [2 4 0] // [7 1 3 9] // [2 8] } func ExampleWhile() { ctx := context.Background() s := stream.FromIterator(iterator.Slice([]string{ "aardvark", "badger", "cheetah", "dinosaur", "egret", })) beforeD := stream.While(s, func(ctx context.Context, s string) (bool, error) { return s < "d", nil }) out, _ := stream.Collect(ctx, beforeD) fmt.Println(out) // Output: // [aardvark badger cheetah] } juniper-0.15.1/stream/stream_test.go000066400000000000000000000134211453027036000174270ustar00rootroot00000000000000package stream import ( "context" "errors" "fmt" "strconv" "sync" "testing" "time" "github.com/bradenaw/juniper/internal/fuzz" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/internal/tseq" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xmath" "github.com/bradenaw/juniper/xslices" ) type intError int func (err intError) Error() string { return strconv.Itoa(int(err)) } func FuzzBatch(f *testing.F) { f.Fuzz(func(t *testing.T, bufferSize int, batchSize int, b []byte) { bufferSize = xmath.Clamp(bufferSize, 0, 1000) batchSize = xmath.Clamp(bufferSize, 0, bufferSize) t.Logf("bufferSize = %#v", bufferSize) t.Logf("batchSize = %#v", batchSize) sender, receiver := Pipe[int](bufferSize) s := Batch(receiver, 10*time.Millisecond, batchSize) var oracle []int sendClosed := false var sendClosedErr error recvClosed := false x := 0 fuzz.Operations( b, func() { // check t.Logf(" oracle = %#v", oracle) t.Logf(" sendClosed = %#v", sendClosed) t.Logf(" sendClosedErr = %#v", sendClosedErr) t.Logf(" recvClosed = %#v", recvClosed) }, func() { if sendClosed { // not allowed return } if len(oracle) == bufferSize { // might block return } t.Logf("sender.Send(ctx, %d)", x) err := sender.Send(context.Background(), x) if !recvClosed { require2.NoError(t, err) } else { require2.True(t, err == nil || errors.Is(err, ErrClosedPipe)) } oracle = append(oracle, x) x++ }, func(withErr bool) { if sendClosed { // not allowed return } if withErr { t.Logf("sender.Close(intError(%d))", x) sendClosedErr = intError(x) sender.Close(sendClosedErr) x++ } else { t.Log("sender.Close(nil)") sender.Close(nil) } sendClosed = true }, func() { if recvClosed { // not allowed return } if len(oracle) == 0 { if sendClosed { t.Log("s.Next(ctx) at end") _, err := s.Next(context.Background()) if sendClosedErr == nil { if err != End { t.Fatalf("%s", err) } } else { if err != sendClosedErr { t.Fatalf("%s", err) } } return } else { // would block return } } t.Log("s.Next(ctx)") batch, err := s.Next(context.Background()) // because of the select, this can produce either error or success for a little // while if sendClosed && err != nil { if sendClosedErr == nil { if err != End { t.Fatalf("%s", err) } } else { if err != sendClosedErr { t.Fatalf("%s", err) } } return } require2.NoError(t, err) // Unfortunately we can't actually tell if the receiver has received everything that // we sent with Send(). require2.Greater(t, len(batch), 0) require2.LessOrEqual(t, len(batch), batchSize) expectedBatch := oracle[:len(batch)] require2.SlicesEqual(t, expectedBatch, batch) t.Logf(" -> %#v", batch) oracle = oracle[len(expectedBatch):] }, func() { if recvClosed { return } t.Log("s.Close()") s.Close() recvClosed = true }, ) }) } func TestBatch(t *testing.T) { ctx := context.Background() sender, receiver := Pipe[int](1) sender.Send(ctx, 1) batches := Batch(receiver, 365*24*time.Hour, 1) _, err := batches.Next(ctx) require2.NoError(t, err) sender, receiver = Pipe[int](1) sender.Send(ctx, 1) batches = Batch(receiver, 0, 2) _, err = batches.Next(context.Background()) require2.NoError(t, err) } func TestPipeConcurrentSend(t *testing.T) { ctx := context.Background() sender, receiver := Pipe[int](0) var wg sync.WaitGroup errs := make([]error, 4) for i := 0; i < 4; i++ { i := i wg.Add(1) go func() { errs[i] = sender.Send(ctx, i) wg.Done() }() } time.Sleep(2 * time.Millisecond) results := make([]bool, 4) item, err := receiver.Next(ctx) require2.NoError(t, err) results[item] = true item, err = receiver.Next(ctx) require2.NoError(t, err) results[item] = true sender.Close(intError(5)) wg.Wait() for i := range results { require2.True(t, results[i] || errors.Is(errs[i], intError(5))) } } func TestChunk(t *testing.T) { for streamLen := 0; streamLen < 10; streamLen++ { for chunkSize := 1; chunkSize < streamLen; chunkSize++ { t.Run(fmt.Sprintf("streamLen=%d,chunkSize=%d", streamLen, chunkSize), func(t *testing.T) { tseq.Run(t, func(tseq *tseq.TSeq) { x := iterator.Collect(iterator.Counter(streamLen)) expected := xslices.Chunk(x, chunkSize) in := FromIterator(iterator.Slice(x)) s := &tseqStream[int]{in, tseq, false} chunked := collectWithRetries(Chunk[int](s, chunkSize)) require2.DeepEqual(t, expected, chunked) }) }) } } } func TestCollectWithRetries(t *testing.T) { for streamLen := 0; streamLen < 10; streamLen++ { t.Run(fmt.Sprintf("streamLen=%d", streamLen), func(t *testing.T) { tseq.Run(t, func(tseq *tseq.TSeq) { x := iterator.Collect(iterator.Counter(streamLen)) in := FromIterator(iterator.Slice(x)) s := &tseqStream[int]{in, tseq, false} out := collectWithRetries[int](s) require2.DeepEqual(t, x, out) }) }) } } type tseqStream[T any] struct { inner Stream[T] tseq *tseq.TSeq prevErr bool } func (s *tseqStream[T]) Next(ctx context.Context) (T, error) { if !s.prevErr && s.tseq.FlipCoin() { var zero T s.prevErr = true return zero, errors.New("") } s.prevErr = false return s.inner.Next(ctx) } func (s *tseqStream[T]) Close() { s.inner.Close() } func collectWithRetries[T any](s Stream[T]) []T { var out []T for { item, err := s.Next(context.Background()) if err == End { return out } else if err != nil { continue } out = append(out, item) } } juniper-0.15.1/stream/testdata/000077500000000000000000000000001453027036000163565ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/000077500000000000000000000000001453027036000173545ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatch/000077500000000000000000000000001453027036000212545ustar00rootroot00000000000000086dc23b3648d1b798461ec898a20079dc501a8b7dbad7cea2cd03b745802029000066400000000000000000000001201453027036000316240ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(1) int(9) []byte("000000000000000000000000000100000000222") 10d95885964d1ae653be1405b9dce20f7e51b56e73efa0fb57c33685490152eb000066400000000000000000000001221453027036000316440ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(68) int(56) []byte("000000000000000000000000000100000000222") 1bef05a7f3aa67e3f6ebf9a30abcdb1006c8b6f8213bda3c1d88aef20cb6d5a0000066400000000000000000000000671453027036000326220ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(166) int(156) []byte("0001000007") 23561f2fec377b58a23fc9ae112672cb799fb004a106a07891671b3f2d17b882000066400000000000000000000000551453027036000314760ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(94) int(46) []byte("C0") 2b49f746d1be4692b93e5697b8098a8b9cf332c79288cea46a0d8277fa923d2e000066400000000000000000000000601453027036000317070ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(-159) int(0) []byte("A7+0") 37aeba375901ac1e1a06110754e790ccb6fdb56d762369b632a9ac7568b02c18000066400000000000000000000001711453027036000316220ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(65) int(100) []byte("00000000000000000072222000000000000000000000000000000000000000000000000000000") 3d6d329a0d771d9fcf822119bce41a6dfa09bf2c694c5f0770c8ebccbe667d6a000066400000000000000000000000541453027036000323670ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(0) int(-13) []byte("7") 463b6bdff2fea2ffe7c295a165301ddd1a58a1f7e4c3c03cae3ba20e8476c394000066400000000000000000000001601453027036000323400ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(65) int(100) []byte("00000xX80008000080'02080800X0X000000X0800008 08X0800x000808888008000") 4dd43c30ae77dd634b96350d60419c975143da6a399fad7e050a747d3140362d000066400000000000000000000000541453027036000314740ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(0) int(-13) []byte("2") f94e99ff4311c69dd759058d3f3e987241a8614d329464fe36c04ce57c3860cc000066400000000000000000000000751453027036000315550ustar00rootroot00000000000000juniper-0.15.1/stream/testdata/fuzz/FuzzBatchgo test fuzz v1 int(154) int(0) []byte("000\"0000000000000") juniper-0.15.1/test_all_versions.sh000066400000000000000000000010541453027036000173450ustar00rootroot00000000000000#!/bin/bash set -euo pipefail go_versions=(1.18 1.19 1.20 1.21) latest="${go_versions[-1]}" if ! go version | grep "go$latest"; then echo >2 "go version expected $latest, got $(go version)" exit 1 fi for go_version in ${go_versions[@]}; do if [[ ${go_version} == ${latest} ]]; then go version go test --race ./... else go install "golang.org/dl/go${go_version}@latest" go_bin="${HOME}/go/bin/go${go_version}" $go_bin download $go_bin version $go_bin test --race ./... fi done juniper-0.15.1/xerrors/000077500000000000000000000000001453027036000147565ustar00rootroot00000000000000juniper-0.15.1/xerrors/xerrors.go000066400000000000000000000024001453027036000170050ustar00rootroot00000000000000// Package xerrors contains extensions to the standard library package errors. package xerrors import ( "errors" "runtime" "strconv" "strings" ) type withStack struct { inner error pc []uintptr } func (err withStack) Error() string { var sb strings.Builder frames := runtime.CallersFrames(err.pc) _, _ = sb.WriteString(err.inner.Error()) _, _ = sb.WriteString("\n\n") for { frame, more := frames.Next() _, _ = sb.WriteString(frame.Function) _, _ = sb.WriteString("(...)\n ") _, _ = sb.WriteString(frame.File) _, _ = sb.WriteString(":") _, _ = sb.WriteString(strconv.Itoa(frame.Line)) _, _ = sb.WriteString("\n") if !more { break } } return sb.String() } func (err withStack) Unwrap() error { return err.inner } // WithStack returns an error that wraps err and adds the call stack of the call to WithStack to // Error(). If err is nil or already has a stack attached, returns err. func WithStack(err error) error { if err == nil { return nil } if errors.Is(err, withStack{}) { return err } var buf [64]uintptr var ptrs []uintptr skip := 2 for { n := runtime.Callers(skip, buf[:]) ptrs = append(ptrs, buf[:n]...) if n < len(buf) { break } skip += n } return withStack{ inner: err, pc: ptrs, } } juniper-0.15.1/xerrors/xerrors_test.go000066400000000000000000000004351453027036000200520ustar00rootroot00000000000000package xerrors import ( "errors" "fmt" "strings" ) func ExampleWithStack() { err := WithStack(errors.New("foo")) fmt.Println(strings.Join(strings.Split(err.Error(), "\n")[:3], "\n")) // Output: // // foo // // github.com/bradenaw/juniper/xerrors.ExampleWithStack(...) } juniper-0.15.1/xmaps/000077500000000000000000000000001453027036000144025ustar00rootroot00000000000000juniper-0.15.1/xmaps/xmaps.go000066400000000000000000000103541453027036000160640ustar00rootroot00000000000000// Package xmaps contains utilities for working with maps. package xmaps import ( "fmt" "github.com/bradenaw/juniper/xslices" "github.com/bradenaw/juniper/xsort" ) // Reverse returns a map from m's values to each of the keys that mapped to it in arbitrary order. func Reverse[M ~map[K]V, K comparable, V comparable](m M) map[V][]K { result := make(map[V][]K, len(m)) for k, v := range m { result[v] = append(result[v], k) } return result } // ReverseSingle returns a map of m's values to m's keys. If there are any duplicate values, the // resulting map has an arbitrary choice of the associated keys and the second return is false. func ReverseSingle[M ~map[K]V, K comparable, V comparable](m M) (map[V]K, bool) { result := make(map[V]K, len(m)) allOk := true for k, v := range m { if _, ok := result[v]; ok { allOk = false } result[v] = k } return result, allOk } // ToIndex returns a map from keys[i] to i. func ToIndex[K comparable](keys []K) map[K]int { m := make(map[K]int, len(keys)) for i := range keys { m[keys[i]] = i } return m } // FromKeysAndValues returns a map from keys[i] to values[i]. If there are any duplicate keys, the // resulting map has an arbitrary choice of the associated values and the second return is false. It // panics if len(keys)!=len(values). func FromKeysAndValues[K comparable, V any](keys []K, values []V) (map[K]V, bool) { if len(keys) != len(values) { panic(fmt.Sprintf("len(keys)=%d, len(values)=%d", len(keys), len(values))) } m := make(map[K]V, len(keys)) allOk := true for i := range keys { if _, ok := m[keys[i]]; ok { allOk = false } m[keys[i]] = values[i] } return m, allOk } // Set[T] is shorthand for map[T]struct{} with convenience methods. type Set[T comparable] map[T]struct{} // Add adds item to the set. func (s Set[T]) Add(item T) { s[item] = struct{}{} } // Remove removes item from the set. func (s Set[T]) Remove(item T) { delete(s, item) } // Contains returns true if item is in the set. func (s Set[T]) Contains(item T) bool { _, ok := s[item]; return ok } // SetFromSlice returns a Set whose elements are items. func SetFromSlice[T comparable](items []T) Set[T] { result := make(Set[T], len(items)) for _, k := range items { result[k] = struct{}{} } return result } // Union returns a set containing all elements of all input sets. func Union[S ~map[T]struct{}, T comparable](sets ...S) S { // Size estimate: the smallest possible result is the largest input set, if it's a superset of // all of the others. size := 0 for _, set := range sets { if len(set) > size { size = len(set) } } out := make(S, size) for _, set := range sets { for k := range set { out[k] = struct{}{} } } return out } // Intersection returns a set of the items that all input sets have in common. func Intersection[S ~map[T]struct{}, T comparable](sets ...S) S { // The smallest intersection is 0, so don't guess about capacity. out := make(S) if len(sets) == 0 { return out } sets = xslices.Clone(sets) xsort.Slice(sets, func(a, b S) bool { return len(a) < len(b) }) for k := range sets[0] { include := true for j := 1; j < len(sets); j++ { if _, ok := sets[j][k]; !ok { include = false break } } if include { out[k] = struct{}{} } } return out } // Intersects returns true if the input sets have any element in common. func Intersects[S ~map[T]struct{}, T comparable](sets ...S) bool { if len(sets) == 0 { return false } // Ideally we check from most-selective to least-selective so we can do the fewest iterations // of each of the below loops. Use set size as an approximation. sets = xslices.Clone(sets) xsort.Slice(sets, func(a, b S) bool { return len(a) < len(b) }) for k := range sets[0] { include := true for j := 1; j < len(sets); j++ { if _, ok := sets[j][k]; !ok { include = false break } } if include { return true } } return false } // Difference returns all items of a that do not appear in b. func Difference[S ~map[T]struct{}, T comparable](a, b S) S { // Size estimate: the smallest possible result is if all items of b are in a. size := len(a) - len(b) if size < 0 { size = 0 } result := make(S, size) for k := range a { if _, ok := b[k]; !ok { result[k] = struct{}{} } } return result } juniper-0.15.1/xmaps/xmaps_test.go000066400000000000000000000033671453027036000171310ustar00rootroot00000000000000package xmaps_test import ( "fmt" "github.com/bradenaw/juniper/xmaps" ) func ExampleReverse() { a := map[string]int{ "foo": 2, "bar": 1, "baz": 2, } reversed := xmaps.Reverse(a) fmt.Println(1, reversed[1][0]) fmt.Println(2, reversed[2][0]) fmt.Println(2, reversed[2][1]) // Unordered output: // 1 bar // 2 foo // 2 baz } func ExampleReverseSingle() { a := map[string]int{ "foo": 1, "bar": 2, "baz": 3, } reversed, ok := xmaps.ReverseSingle(a) fmt.Println(ok) fmt.Println(reversed) // Output: // true // map[1:foo 2:bar 3:baz] } func ExampleToIndex() { m := []string{"foo", "bar", "baz"} fmt.Println(xmaps.ToIndex(m)) // Output: // map[bar:1 baz:2 foo:0] } func ExampleUnion() { a := xmaps.Set[int]{ 1: {}, 4: {}, } b := xmaps.Set[int]{ 3: {}, 4: {}, } c := xmaps.Set[int]{ 1: {}, 5: {}, } union := xmaps.Union(a, b, c) fmt.Println(union) // Output: // map[1:{} 3:{} 4:{} 5:{}] } func ExampleIntersection() { a := xmaps.Set[int]{ 1: {}, 2: {}, 4: {}, } b := xmaps.Set[int]{ 1: {}, 3: {}, 4: {}, } c := map[int]struct{}{ 1: {}, 4: {}, 5: {}, } intersection := xmaps.Intersection(a, b, c) fmt.Println(intersection) // Output: // map[1:{} 4:{}] } func ExampleIntersects() { a := xmaps.Set[int]{ 1: {}, 2: {}, } b := xmaps.Set[int]{ 1: {}, 3: {}, } c := xmaps.Set[int]{ 3: {}, 4: {}, } fmt.Println(xmaps.Intersects(a, b)) fmt.Println(xmaps.Intersects(b, c)) fmt.Println(xmaps.Intersects(a, c)) // Output: // true // true // false } func ExampleDifference() { a := xmaps.Set[int]{ 1: {}, 4: {}, 5: {}, } b := xmaps.Set[int]{ 3: {}, 4: {}, } difference := xmaps.Difference(a, b) fmt.Println(difference) // Output: // map[1:{} 5:{}] } juniper-0.15.1/xmath/000077500000000000000000000000001453027036000143735ustar00rootroot00000000000000juniper-0.15.1/xmath/xmath.go000066400000000000000000000007471453027036000160530ustar00rootroot00000000000000// Package xmath contains extensions to the standard library package math. package xmath // Abs returns the absolute value of x. It panics if this value is not representable, for example // because -math.MinInt32 requires more than 32 bits to represent and so does not fit in an int32. func Abs[T ~int | ~int8 | ~int16 | ~int32 | ~int64](x T) T { if x < 0 { if -x == x { panic("can't xmath.Abs minimum value: positive equivalent not representable") } return -x } return x } juniper-0.15.1/xmath/xmath_go1.21.go000066400000000000000000000010571453027036000170350ustar00rootroot00000000000000//go:build go1.21 package xmath import "cmp" // Min returns the minimum of a and b based on the < operator. // // Deprecated: min is a builtin as of Go 1.21. func Min[T cmp.Ordered](a, b T) T { return min(a, b) } // Max returns the maximum of a and b based on the > operator. // // Deprecated: max is a builtin as of Go 1.21. func Max[T cmp.Ordered](a, b T) T { return max(a, b) } // Clamp clamps the value of x to within min and max. func Clamp[T cmp.Ordered](x, min, max T) T { if x < min { return min } if x > max { return max } return x } juniper-0.15.1/xmath/xmath_old.go000066400000000000000000000010411453027036000166750ustar00rootroot00000000000000//go:build !go1.21 package xmath import "golang.org/x/exp/constraints" // Min returns the minimum of a and b based on the < operator. func Min[T constraints.Ordered](a, b T) T { if a < b { return a } return b } // Max returns the maximum of a and b based on the > operator. func Max[T constraints.Ordered](a, b T) T { if a > b { return a } return b } // Clamp clamps the value of x to within min and max. func Clamp[T constraints.Ordered](x, min, max T) T { if x < min { return min } if x > max { return max } return x } juniper-0.15.1/xmath/xrand/000077500000000000000000000000001453027036000155075ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/000077500000000000000000000000001453027036000173205ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/000077500000000000000000000000001453027036000203165ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInner/000077500000000000000000000000001453027036000234125ustar00rootroot00000000000000026df4d84a92eefda60cf8f15aef08a4002b1faad98e27586df1de381b7922b5000066400000000000000000000000441453027036000344360ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("0") int(63) 03cc0292c8d26ab239059a35ced1322911297f07dd25db1237d0005fe0e19841000066400000000000000000000001131453027036000335150ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("0000000000000000000000000000000000000000") int(31) 05156fd053a2d13b3c9cbe36ad5755fab5f31ab3242550a49692aa79ee1e4045000066400000000000000000000002631453027036000340310ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("89111B0901A100990A20C201107212718X22c817272C8807B080281X222C0702A1070X9170811272810A2179270277002012097120011812 10Ya1C8112120A111012C11198A100B") int(27) 086d599a93f0c1c236c6ff45cacfc93e9141c3ccf07a59535bdb572967798436000066400000000000000000000001031453027036000340310ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("00000000000000000000000000000000") int(77) 0a0eb5ba2014c951af24c8b8f5d3fa9d19272442bee86d80d79384e9fe18abf2000066400000000000000000000000531453027036000343010ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("00000000") int(20) 21a46963f603d3cbe3bafc40ae07dfdbfcb66058f8d3466f4ec3c5b3d3b3d618000066400000000000000000000001001453027036000344760ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("\xff\xff\xff\xff\xff\xff\xff0") int(24) 2fbcb46cbae7d9a3d2dc693a06510cfcd404fba718dce27e17b821f76ecfedb1000066400000000000000000000000431453027036000347770ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("") int(20) 32ebf3925cbf5e62bbf03b28b11b28a721d1f2e024b99ca5f6d93fc90aaf97df000066400000000000000000000000441453027036000345060ustar00rootroot00000000000000juniper-0.15.1/xmath/xrand/testdata/fuzz/FuzzSampleInnergo test fuzz v1 []byte("") int(-61) juniper-0.15.1/xmath/xrand/xrand.go000066400000000000000000000140451453027036000171560ustar00rootroot00000000000000// Package xrand contains extensions to the standard library package math/rand. package xrand import ( "context" "math" "math/rand" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/stream" ) type randRand interface { Float64() float64 Intn(int) int Shuffle(int, func(int, int)) } type defaultRand struct{} func (defaultRand) Float64() float64 { return rand.Float64() } func (defaultRand) Intn(n int) int { return rand.Intn(n) } func (defaultRand) Shuffle(n int, swap func(int, int)) { rand.Shuffle(n, swap) } // Shuffle pseudo-randomizes the order of a. func Shuffle[T any](a []T) { rShuffle(defaultRand{}, a) } // RShuffle pseudo-randomizes the order of a. func RShuffle[T any](r *rand.Rand, a []T) { rShuffle(r, a) } func rShuffle[T any, R randRand](r R, a []T) { r.Shuffle(len(a), func(i, j int) { a[i], a[j] = a[j], a[i] }) } // Sample pseudo-randomly picks k ints uniformly without replacement from [0, n). // // If n < k, returns all ints in [0, n). // // Requires O(k) time and space. func Sample(n int, k int) []int { return rSample(defaultRand{}, n, k) } // RSample pseudo-randomly picks k ints uniformly without replacement from [0, n). // // If n < k, returns all ints in [0, n). // // Requires O(k) time and space. func RSample(r *rand.Rand, n int, k int) []int { return rSample(r, n, k) } func rSample[R randRand](r R, n int, k int) []int { out := make([]int, k) samp := newSampler(r, k) for { next, replace := samp.Next() if next >= n { break } out[replace] = next } if n < k { out = out[:n] } rShuffle(r, out) return out } // SampleIterator pseudo-randomly picks k items uniformly without replacement from iter. // // If iter yields fewer than k items, returns all of them. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses time // linear in the length of iter but only O(k) extra space. func SampleIterator[T any](iter iterator.Iterator[T], k int) []T { return rSampleIterator(defaultRand{}, iter, k) } // RSampleIterator pseudo-randomly picks k items uniformly without replacement from iter. // // If iter yields fewer than k items, returns all of them. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses time // linear in the length of iter but only O(k) extra space. func RSampleIterator[T any](r *rand.Rand, iter iterator.Iterator[T], k int) []T { return rSampleIterator(r, iter, k) } func rSampleIterator[T any, R randRand](r R, iter iterator.Iterator[T], k int) []T { out := make([]T, k) i := 0 samp := newSampler(r, k) Outer: for { next, replace := samp.Next() for { item, ok := iter.Next() if !ok { break Outer } if i == next { out[replace] = item i++ break } i++ } } if i < k { out = out[:i] } rShuffle(r, out) return out } // SampleStream pseudo-randomly picks k items uniformly without replacement from s. // // If s yields fewer than k items, returns all of them. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses time // linear in the length of s but only O(k) extra space. func SampleStream[T any](ctx context.Context, s stream.Stream[T], k int) ([]T, error) { return rSampleStream(ctx, defaultRand{}, s, k) } // RSampleStream pseudo-randomly picks k items uniformly without replacement from s. // // If s yields fewer than k items, returns all of them. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses time // linear in the length of s but only O(k) extra space. func RSampleStream[T any]( ctx context.Context, r *rand.Rand, s stream.Stream[T], k int, ) ([]T, error) { return rSampleStream(ctx, r, s, k) } func rSampleStream[T any, R randRand]( ctx context.Context, r R, s stream.Stream[T], k int, ) ([]T, error) { defer s.Close() out := make([]T, k) i := 0 samp := newSampler(r, k) Outer: for { next, replace := samp.Next() for { item, err := s.Next(ctx) if err == stream.End { break Outer } else if err != nil { return nil, err } if i == next { out[replace] = item i++ break } i++ } } if i < k { out = out[:i] } rShuffle(r, out) return out, nil } // SampleSlice pseudo-randomly picks k items uniformly without replacement from a. // // If len(a) < k, returns all items in a. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses O(k) time // and space. func SampleSlice[T any](a []T, k int) []T { return rSampleSlice(defaultRand{}, a, k) } // RSampleSlice pseudo-randomly picks k items uniformly without replacement from a. // // If len(a) < k, returns all items in a. // // Uses a reservoir sample (https://en.wikipedia.org/wiki/Reservoir_sampling), which uses O(k) time // and space. func RSampleSlice[T any](r *rand.Rand, a []T, k int) []T { return rSampleSlice(r, a, k) } func rSampleSlice[T any, R randRand](r R, a []T, k int) []T { out := make([]T, k) samp := newSampler(r, k) for { next, replace := samp.Next() if next >= len(a) { break } out[replace] = a[next] } if len(a) < k { out = out[:len(a)] } rShuffle(r, out) return out } type sampler[R randRand] struct { i int first bool w float64 k int r R } func newSampler[R randRand](r R, k int) sampler[R] { return sampler[R]{ i: 0, first: true, w: math.Exp(math.Log(r.Float64()) / float64(k)), k: k, r: r, } } // Returns (next, replace) such that next is always increasing, and that the input item at index // next (if there is one) should replace the reservoir item at index replace. // // As such, for the first k iterations, always returns (i, i) to build the reservoir. func (s *sampler[R]) Next() (int, int) { if s.i < s.k { j := s.i s.i++ return j, j } if s.first && s.i == s.k { s.i-- s.first = false } skip := math.Floor(math.Log(s.r.Float64()) / math.Log(1-s.w)) if math.IsInf(skip, 0) || math.IsNaN(skip) { return math.MaxInt, 0 } s.i += int(skip) + 1 s.w *= math.Exp(math.Log(s.r.Float64()) / float64(s.k)) return s.i, s.r.Intn(s.k) } juniper-0.15.1/xmath/xrand/xrand_example_test.go000066400000000000000000000004531453027036000217260ustar00rootroot00000000000000package xrand_test import ( "fmt" "math/rand" "github.com/bradenaw/juniper/xmath/xrand" ) func ExampleSample() { r := rand.New(rand.NewSource(0)) sample := xrand.RSample(r, 100, 5) for _, x := range sample { fmt.Println(x) } // Unordered output: // 45 // 71 // 88 // 93 // 60 } juniper-0.15.1/xmath/xrand/xrand_test.go000066400000000000000000000057161453027036000202220ustar00rootroot00000000000000package xrand import ( "context" "encoding/binary" "math" "math/rand" "testing" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/stream" ) type fuzzRand struct { t *testing.T b []byte } func (r *fuzzRand) Intn(n int) int { if len(r.b) < 4 { return 0 } x := binary.BigEndian.Uint32(r.b[:4]) r.b = r.b[4:] return int(x) % n } func (r *fuzzRand) Float64() float64 { if len(r.b) < 8 { return 0 } x := binary.BigEndian.Uint64(r.b[:8]) r.b = r.b[8:] out := float64(x) / math.MaxUint64 if out == 1 { out = math.Nextafter(out, 0) } require2.GreaterOrEqual(r.t, out, float64(0)) require2.Less(r.t, out, float64(1)) r.t.Logf("%f", out) return out } func (r *fuzzRand) Shuffle(int, func(int, int)) { panic("unimplemented") } func FuzzSampleInner(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte, k int) { if k <= 0 { return } t.Logf("k %d", k) r := &fuzzRand{t, b} samp := newSampler(r, k) prev := 0 for i := 0; i < 100; i++ { next, replace := samp.Next() t.Logf("%d: next %d replace %d", i, next, replace) if next == math.MaxInt { break } if i < k { require2.Equal(t, next, i) require2.Equal(t, replace, i) } else { require2.Greater(t, next, prev) require2.GreaterOrEqual(t, replace, 0) require2.Less(t, replace, k) } prev = next } }) } func stddev(a []int) float64 { m := mean(a) sumSquaredDeviation := float64(0) for i := range a { deviation := m - float64(a[i]) sumSquaredDeviation += (deviation * deviation) } return math.Sqrt(sumSquaredDeviation / float64(len(a))) } func mean(a []int) float64 { sum := 0 for i := range a { sum += a[i] } return float64(sum) / float64(len(a)) } // f must return the same as Sample(r, 20, 5). func testSample(t *testing.T, f func(r *rand.Rand) []int) { r := rand.New(rand.NewSource(0)) counts := make([]int, 20) for i := 0; i < 10000; i++ { sample := f(r) for _, item := range sample { counts[item]++ } } m := mean(counts) t.Logf("counts %#v", counts) t.Logf("stddev %#v", stddev(counts)) t.Logf("stddev / mean %#v", stddev(counts)/m) // There's certainly a better statistical test than this, but I haven't bothered to break out // the stats book yet. require2.InDelta(t, 0.02, stddev(counts)/m, 0.01) } func TestSample(t *testing.T) { testSample(t, func(r *rand.Rand) []int { return RSample(r, 20, 5) }) } func TestSampleSlice(t *testing.T) { a := iterator.Collect(iterator.Counter(20)) testSample(t, func(r *rand.Rand) []int { return RSampleSlice(r, a, 5) }) } func TestSampleIterator(t *testing.T) { testSample(t, func(r *rand.Rand) []int { return RSampleIterator(r, iterator.Counter(20), 5) }) } func TestSampleStream(t *testing.T) { testSample(t, func(r *rand.Rand) []int { out, err := RSampleStream( context.Background(), r, stream.FromIterator(iterator.Counter(20)), 5, ) require2.NoError(t, err) return out }) } juniper-0.15.1/xslices/000077500000000000000000000000001453027036000147245ustar00rootroot00000000000000juniper-0.15.1/xslices/testdata/000077500000000000000000000000001453027036000165355ustar00rootroot00000000000000juniper-0.15.1/xslices/testdata/fuzz/000077500000000000000000000000001453027036000175335ustar00rootroot00000000000000juniper-0.15.1/xslices/testdata/fuzz/FuzzRemoveUnordered/000077500000000000000000000000001453027036000235175ustar00rootroot00000000000000cfcad4015f83ee9d954573d1a6f85b097098aa91d6191ef5ca7ca8f83de48020000066400000000000000000000000501453027036000343430ustar00rootroot00000000000000juniper-0.15.1/xslices/testdata/fuzz/FuzzRemoveUnorderedgo test fuzz v1 int(82) int(42) int(65) f7bbe0c14562f70cc3d89e6d9fe8862df888ab393d37a2168390c749c5d87579000066400000000000000000000000471453027036000341730ustar00rootroot00000000000000juniper-0.15.1/xslices/testdata/fuzz/FuzzRemoveUnorderedgo test fuzz v1 int(73) int(0) int(-7) juniper-0.15.1/xslices/xslices.go000066400000000000000000000137271453027036000167370ustar00rootroot00000000000000// Package xslices contains utilities for working with slices of arbitrary types. package xslices // All returns true if f(s[i]) returns true for all i. Trivially, returns true if s is empty. func All[T any](s []T, f func(T) bool) bool { for i := range s { if !f(s[i]) { return false } } return true } // Chunk returns non-overlapping chunks of s. The last chunk will be smaller than chunkSize if // len(s) is not a multiple of chunkSize. // // Returns an empty slice if len(s)==0. Panics if chunkSize <= 0. func Chunk[T any](s []T, chunkSize int) [][]T { out := make([][]T, (len(s)+chunkSize-1)/chunkSize) for i := range out { start := i * chunkSize end := (i + 1) * chunkSize if end > len(s) { end = len(s) } out[i] = s[start:end] } return out } // Clear fills s with the zero value of T. // // Deprecated: clear is a builtin as of Go 1.21. func Clear[T any](s []T) { var zero T Fill(s, zero) } // Count returns the number of times x appears in s. func Count[T comparable](s []T, x T) int { return CountFunc(s, func(s T) bool { return x == s }) } // Count returns the number of items in s for which f returns true. func CountFunc[T any](s []T, f func(T) bool) int { n := 0 for _, s := range s { if f(s) { n++ } } return n } // Fill fills s with copies of x. func Fill[T any](s []T, x T) { for i := range s { s[i] = x } } // Group returns a map from u to all items of s for which f(s[i]) returned u. func Group[T any, U comparable](s []T, f func(T) U) map[U][]T { m := make(map[U][]T) for i := range s { g := f(s[i]) m[g] = append(m[g], s[i]) } return m } // Join joins together the contents of each in. func Join[T any](in ...[]T) []T { n := 0 for i := range in { n += len(in[i]) } out := make([]T, 0, n) for i := range in { out = append(out, in[i]...) } return out } // LastIndex returns the last index of x in s, or -1 if x is not in s. func LastIndex[T comparable](s []T, x T) int { for i := len(s) - 1; i >= 0; i-- { if s[i] == x { return i } } return -1 } // LastIndexFunc returns the last index in s for which f(s[i]) returns true, or -1 if there are no // such items. func LastIndexFunc[T any](s []T, f func(T) bool) int { for i := len(s) - 1; i >= 0; i-- { if f(s[i]) { return i } } return -1 } // Map creates a new slice by applying f to each element of s. func Map[T any, U any](s []T, f func(T) U) []U { out := make([]U, len(s)) for i := range s { out[i] = f(s[i]) } return out } // Partition moves elements of s such that all elements for which f returns false are at the // beginning and all elements for which f returns true are at the end. It makes no other guarantees // about the final order of elements. Returns the index of the first element for which f returned // true, or len(s) if there wasn't one. func Partition[T any](s []T, f func(t T) bool) int { i := 0 j := len(s) - 1 for { for i < j { if !f(s[i]) { i++ } else { break } } for j > i { if f(s[j]) { j-- } else { break } } if i >= j { break } s[i], s[j] = s[j], s[i] i++ j-- } if i < len(s) && !f(s[i]) { i++ } return i } // Reduce reduces s to a single value using the reduction function f. func Reduce[T any, U any](s []T, initial U, f func(U, T) U) U { out := initial for i := range s { out = f(out, s[i]) } return out } // RemoveUnordered removes n elements from s starting at index idx and returns the modified slice. // This is done by moving up to n elements from the end of the slice into the gap left by removal, // which is linear in n (rather than len(s)-idx as Remove() is), but does not preserve order of the // remaining elements. func RemoveUnordered[T any](s []T, idx int, n int) []T { keepStart := len(s) - n removeEnd := idx + n if removeEnd > keepStart { keepStart = removeEnd } copy(s[idx:], s[keepStart:]) Clear(s[len(s)-n:]) return s[:len(s)-n] } // Repeat returns a slice with length n where every item is s. func Repeat[T any](s T, n int) []T { out := make([]T, n) for i := range out { out[i] = s } return out } // Reverse reverses the elements of s in place. func Reverse[T any](s []T) { for i := 0; i < len(s)/2; i++ { s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i] } } // Runs returns a slice of slices. The inner slices are contiguous runs of elements from s such that // same(a, b) returns true for any a and b in the run. // // same(a, a) must return true. If same(a, b) and same(b, c) both return true, then same(a, c) must // also. // // The returned slices use the same underlying array as s. func Runs[T any](s []T, same func(a, b T) bool) [][]T { var runs [][]T start := 0 end := 0 for i := 1; i < len(s); i++ { if same(s[i-1], s[i]) { end = i + 1 } else { runs = append(runs, s[start:end]) start = i end = i + 1 } } if end > 0 { runs = append(runs, s[start:]) } return runs } // Shrink shrinks s's capacity by reallocating, if necessary, so that cap(s) <= len(s) + n. func Shrink[T any](s []T, n int) []T { if cap(s) > len(s)+n { x2 := make([]T, len(s)+n) copy(x2, s) return x2[:len(s)] } return s } // Unique returns a slice that contains only the first instance of each unique item in s, preserving // order. // // Compact is more efficient if duplicates are already adjacent in s, for example if s is in sorted // order. func Unique[T comparable](s []T) []T { return uniqueInto([]T{}, s) } // UniqueInPlace returns a slice that contains only the first instance of each unique item in s, // preserving order. This is done in-place and so modifies the contents of s. The modified slice is // returned. // // Compact is more efficient if duplicates are already adjacent in s, for example if s is in sorted // order. func UniqueInPlace[T comparable](s []T) []T { filtered := uniqueInto(s[:0], s) Clear(s[len(filtered):]) return filtered } func uniqueInto[T comparable](into []T, s []T) []T { m := make(map[T]struct{}, len(s)) for i := range s { _, ok := m[s[i]] if !ok { into = append(into, s[i]) m[s[i]] = struct{}{} } } return into } juniper-0.15.1/xslices/xslices_example_test.go000066400000000000000000000157501453027036000215070ustar00rootroot00000000000000package xslices_test import ( "bytes" "fmt" "math" "strings" "github.com/bradenaw/juniper/xmath" "github.com/bradenaw/juniper/xslices" ) func ExampleAll() { isOdd := func(x int) bool { return x%2 != 0 } allOdd := xslices.All([]int{1, 3, 5}, isOdd) fmt.Println(allOdd) allOdd = xslices.All([]int{1, 3, 6}, isOdd) fmt.Println(allOdd) // Output: // true // false } func ExampleAny() { isOdd := func(x int) bool { return x%2 != 0 } anyOdd := xslices.Any([]int{2, 3, 4}, isOdd) fmt.Println(anyOdd) anyOdd = xslices.Any([]int{2, 4, 6}, isOdd) fmt.Println(anyOdd) // Output: // true // false } func ExampleClear() { s := []int{1, 2, 3} xslices.Clear(s) fmt.Println(s) // Output: // [0 0 0] } func ExampleChunk() { s := []string{"a", "b", "c", "d", "e", "f", "g", "h"} chunks := xslices.Chunk(s, 3) fmt.Println(chunks) // Output: // [[a b c] [d e f] [g h]] } func ExampleClone() { s := []int{1, 2, 3} cloned := xslices.Clone(s) fmt.Println(cloned) // Output: // [1 2 3] } func ExampleCompact() { s := []string{"a", "a", "b", "c", "c", "c", "a"} compacted := xslices.Compact(s) fmt.Println(compacted) // Output: // [a b c a] } func ExampleCompactFunc() { s := []string{ "bank", "beach", "ghost", "goat", "group", "yaw", "yew", } compacted := xslices.CompactFunc(s, func(a, b string) bool { return a[0] == b[0] }) fmt.Println(compacted) // Output: // [bank ghost yaw] } func ExampleCompactInPlace() { s := []string{"a", "a", "b", "c", "c", "c", "a"} compacted := xslices.CompactInPlace(s) fmt.Println(compacted) // Output: // [a b c a] } func ExampleCompactInPlaceFunc() { s := []string{ "bank", "beach", "ghost", "goat", "group", "yaw", "yew", } compacted := xslices.CompactInPlaceFunc(s, func(a, b string) bool { return a[0] == b[0] }) fmt.Println(compacted) // Output: // [bank ghost yaw] } func ExampleCount() { s := []string{"a", "b", "a", "a", "b"} fmt.Println(xslices.Count(s, "a")) // Output: // 3 } func ExampleEqual() { x := []string{"a", "b", "c"} y := []string{"a", "b", "c"} z := []string{"a", "b", "d"} fmt.Println(xslices.Equal(x, y)) fmt.Println(xslices.Equal(x[:2], y)) fmt.Println(xslices.Equal(z, y)) // Output: // true // false // false } func ExampleEqualFunc() { x := [][]byte{[]byte("a"), []byte("b"), []byte("c")} y := [][]byte{[]byte("a"), []byte("b"), []byte("c")} z := [][]byte{[]byte("a"), []byte("b"), []byte("d")} fmt.Println(xslices.EqualFunc(x, y, bytes.Equal)) fmt.Println(xslices.EqualFunc(x[:2], y, bytes.Equal)) fmt.Println(xslices.EqualFunc(z, y, bytes.Equal)) // Output: // true // false // false } func ExampleFill() { s := []int{1, 2, 3} xslices.Fill(s, 5) fmt.Println(s) // Output: // [5 5 5] } func ExampleFilter() { s := []int{5, -9, -2, 1, -4, 8, 3} s = xslices.Filter(s, func(value int) bool { return value > 0 }) fmt.Println(s) // Output: // [5 1 8 3] } func ExampleFilterInPlace() { s := []int{5, -9, -2, 1, -4, 8, 3} s = xslices.FilterInPlace(s, func(value int) bool { return value > 0 }) fmt.Println(s) // Output: // [5 1 8 3] } func ExampleGrow() { s := make([]int, 0, 1) s = xslices.Grow(s, 4) fmt.Println(len(s)) fmt.Println(cap(s)) s = append(s, 1) addr := &s[0] s = append(s, 2) fmt.Println(addr == &s[0]) s = append(s, 3) fmt.Println(addr == &s[0]) s = append(s, 4) fmt.Println(addr == &s[0]) // Output: // 0 // 4 // true // true // true } func ExampleGroup() { words := []string{ "bank", "beach", "ghost", "goat", "group", "yaw", "yew", } groups := xslices.Group(words, func(s string) rune { return ([]rune(s))[0] }) for firstChar, group := range groups { fmt.Printf("%c: %v\n", firstChar, group) } // Unordered output: // b: [bank beach] // g: [ghost goat group] // y: [yaw yew] } func ExampleIndex() { s := []string{"a", "b", "a", "a", "b"} fmt.Println(xslices.Index(s, "b")) fmt.Println(xslices.Index(s, "c")) // Output: // 1 // -1 } func ExampleIndexFunc() { s := []string{ "blue", "green", "yellow", "gold", "red", } fmt.Println(xslices.IndexFunc(s, func(s string) bool { return strings.HasPrefix(s, "g") })) fmt.Println(xslices.IndexFunc(s, func(s string) bool { return strings.HasPrefix(s, "p") })) // Output: // 1 // -1 } func ExampleInsert() { s := []string{"a", "b", "c", "d", "e"} s = xslices.Insert(s, 3, "f", "g") fmt.Println(s) // Output: // [a b c f g d e] } func ExampleJoin() { joined := xslices.Join( []string{"a", "b", "c"}, []string{"x", "y"}, []string{"l", "m", "n", "o"}, ) fmt.Println(joined) // Output: // [a b c x y l m n o] } func ExampleLastIndex() { s := []string{"a", "b", "a", "a", "b"} fmt.Println(xslices.LastIndex(s, "a")) fmt.Println(xslices.LastIndex(s, "c")) // Output: // 3 // -1 } func ExampleLastIndexFunc() { s := []string{ "blue", "green", "yellow", "gold", "red", } fmt.Println(xslices.LastIndexFunc(s, func(s string) bool { return strings.HasPrefix(s, "g") })) fmt.Println(xslices.LastIndexFunc(s, func(s string) bool { return strings.HasPrefix(s, "p") })) // Output: // 3 // -1 } func ExampleMap() { toHalfFloat := func(x int) float32 { return float32(x) / 2 } s := []int{1, 2, 3} floats := xslices.Map(s, toHalfFloat) fmt.Println(floats) // Output: // [0.5 1 1.5] } func ExamplePartition() { s := []int{11, 3, 4, 2, 7, 8, 0, 1, 14} xslices.Partition(s, func(x int) bool { return x%2 == 0 }) fmt.Println(s) // Output: // [11 3 1 7 2 8 0 4 14] } func ExampleReduce() { s := []int{3, 1, 2} sum := xslices.Reduce(s, 0, func(x, y int) int { return x + y }) fmt.Println(sum) min := xslices.Reduce(s, math.MaxInt, xmath.Min[int]) fmt.Println(min) // Output: // 6 // 1 } func ExampleRemove() { s := []int{1, 2, 3, 4, 5} s = xslices.Remove(s, 1, 2) fmt.Println(s) // Output: // [1 4 5] } func ExampleRemoveUnordered() { s := []int{1, 2, 3, 4, 5} s = xslices.RemoveUnordered(s, 1, 1) fmt.Println(s) s = xslices.RemoveUnordered(s, 1, 2) fmt.Println(s) // Output: // [1 5 3 4] // [1 4] } func ExampleRepeat() { s := xslices.Repeat("a", 4) fmt.Println(s) // Output: // [a a a a] } func ExampleReverse() { s := []string{"a", "b", "c", "d", "e"} xslices.Reverse(s) fmt.Println(s) // Output: // [e d c b a] } func ExampleRuns() { s := []int{2, 4, 0, 7, 1, 3, 9, 2, 8} parityRuns := xslices.Runs(s, func(a, b int) bool { return a%2 == b%2 }) fmt.Println(parityRuns) // Output: // [[2 4 0] [7 1 3 9] [2 8]] } func ExampleShrink() { s := make([]int, 3, 15) s[0] = 0 s[1] = 1 s[2] = 2 fmt.Println(s) fmt.Println(cap(s)) s = xslices.Shrink(s, 0) fmt.Println(s) fmt.Println(cap(s)) // Output: // [0 1 2] // 15 // [0 1 2] // 3 } func ExampleUnique() { s := []string{"a", "b", "b", "c", "a", "b", "b", "c"} unique := xslices.Unique(s) fmt.Println(unique) // Output: // [a b c] } func ExampleUniqueInPlace() { s := []string{"a", "b", "b", "c", "a", "b", "b", "c"} unique := xslices.UniqueInPlace(s) fmt.Println(unique) // Output: // [a b c] } juniper-0.15.1/xslices/xslices_go1.21.go000066400000000000000000000126431453027036000177220ustar00rootroot00000000000000//go:build go1.21 package xslices import ( "slices" ) // Any returns true if f(s[i]) returns true for any i. Trivially, returns false if s is empty. // // Deprecated: slices.ContainsFunc is in the standard library as of Go 1.21. func Any[T any](s []T, f func(T) bool) bool { return slices.ContainsFunc(s, f) } // Clone creates a new slice and copies the elements of s into it. // // Deprecated: slices.Clone is in the standard library as of Go 1.21. func Clone[T any](s []T) []T { return slices.Clone(s) } // Compact returns a slice containing only the first item from each contiguous run of the same item. // // For example, this can be used to remove duplicates more cheaply than Unique when the slice is // already in sorted order. // // Deprecated: slices.Compact(slices.Clone(s)) is in the standard library as of Go 1.21. func Compact[T comparable](s []T) []T { return slices.Compact(slices.Clone(s)) } // CompactInPlace returns a slice containing only the first item from each contiguous run of the // same item. This is done in-place and so modifies the contents of s. The modified slice is // returned. // // For example, this can be used to remove duplicates more cheaply than Unique when the slice is // already in sorted order. // // Deprecated: slices.Compact is in the standard library as of Go 1.21. func CompactInPlace[T comparable](s []T) []T { return slices.Compact(s) } // CompactFunc returns a slice containing only the first item from each contiguous run of items for // which eq returns true. // // Deprecated: slices.CompactFunc(slices.Clone(s)) is in the standard library as of Go 1.21. func CompactFunc[T any](s []T, eq func(T, T) bool) []T { return slices.CompactFunc(slices.Clone(s), eq) } // CompactInPlaceFunc returns a slice containing only the first item from each contiguous run of // items for which eq returns true. This is done in-place and so modifies the contents of s. The // modified slice is returned. // // Deprecated: slices.CompactFunc is in the standard library as of Go 1.21. func CompactInPlaceFunc[T any](s []T, eq func(T, T) bool) []T { return slices.CompactFunc(s, eq) } // Equal returns true if a and b contain the same items in the same order. // // Deprecated: slices.Equal is in the standard library as of Go 1.21. func Equal[T comparable](a, b []T) bool { return slices.Equal(a, b) } // EqualFunc returns true if a and b contain the same items in the same order according to eq. // // Deprecated: slices.EqualFunc is in the standard library as of Go 1.21. func EqualFunc[T any](a, b []T, eq func(T, T) bool) bool { return slices.EqualFunc(a, b, eq) } // Filter returns a slice containing only the elements of s for which keep() returns true in the // same order that they appeared in s. // // Deprecated: slices.DeleteFunc(slices.Clone(s), f) is in the standard library as of Go 1.21, // though the polarity of the passed function is opposite: return true to remove, rather than to // retain. func Filter[T any](s []T, keep func(t T) bool) []T { return slices.DeleteFunc(slices.Clone(s), func(t T) bool { return !keep(t) }) } // FilterInPlace returns a slice containing only the elements of s for which keep() returns true in // the same order that they appeared in s. This is done in-place and so modifies the contents of s. // The modified slice is returned. // // Deprecated: slices.DeleteFunc is in the standard library as of Go 1.21, though the polarity of // the passed function is opposite: return true to remove, rather than to retain. func FilterInPlace[T any](s []T, keep func(t T) bool) []T { return slices.DeleteFunc(s, func(t T) bool { return !keep(t) }) } // Grow grows s's capacity by reallocating, if necessary, to fit n more elements and returns the // modified slice. This does not change the length of s. After Grow(s, n), the following n // append()s to s will not need to reallocate. // // Deprecated: slices.Grow is in the standard library as of Go 1.21. func Grow[T any](s []T, n int) []T { return slices.Grow(s, n) } // Index returns the first index of x in s, or -1 if x is not in s. // // Deprecated: slices.Index is in the standard library as of Go 1.21. func Index[T comparable](s []T, x T) int { return slices.Index(s, x) } // Index returns the first index in s for which f(s[i]) returns true, or -1 if there are no such // items. // // Deprecated: slices.IndexFunc is in the standard library as of Go 1.21. func IndexFunc[T any](s []T, f func(T) bool) int { return slices.IndexFunc(s, f) } // Insert inserts the given values starting at index idx, shifting elements after idx to the right // and growing the slice to make room. Insert will expand the length of the slice up to its capacity // if it can, if this isn't desired then s should be resliced to have capacity equal to its length: // // s[:len(s):len(s)] // // The time cost is O(n+m) where n is len(values) and m is len(s[idx:]). // // Deprecated: slices.Insert is in the standard library as of Go 1.21. func Insert[T any](s []T, idx int, values ...T) []T { return slices.Insert(s, idx, values...) } // Remove removes n elements from s starting at index idx and returns the modified slice. This // requires shifting the elements after the removed elements over, and so its cost is linear in the // number of elements shifted. // // Deprecated: slices.Delete is in the standard library as of Go 1.21, though slices.Delete takes // two indexes rather than an index and a length. func Remove[T any](s []T, idx int, n int) []T { return slices.Delete(s, idx, idx+n) } juniper-0.15.1/xslices/xslices_old.go000066400000000000000000000142201453027036000175620ustar00rootroot00000000000000//go:build !go1.21 package xslices // Any returns true if f(s[i]) returns true for any i. Trivially, returns false if s is empty. func Any[T any](s []T, f func(T) bool) bool { for i := range s { if f(s[i]) { return true } } return false } // Clone creates a new slice and copies the elements of s into it. func Clone[T any](s []T) []T { return append([]T{}, s...) } // Compact returns a slice containing only the first item from each contiguous run of the same item. // // For example, this can be used to remove duplicates more cheaply than Unique when the slice is // already in sorted order. // // Deprecated: slices.Compact(slices.Clone(s)) is in the standard library as of Go 1.21. func Compact[T comparable](s []T) []T { return compactFuncInto([]T{}, s, func(a, b T) bool { return a == b }) } // CompactInPlace returns a slice containing only the first item from each contiguous run of the // same item. This is done in-place and so modifies the contents of s. The modified slice is // returned. // // For example, this can be used to remove duplicates more cheaply than Unique when the slice is // already in sorted order. func CompactInPlace[T comparable](s []T) []T { compacted := compactFuncInto(s[:0], s, func(a, b T) bool { return a == b }) Clear(s[len(compacted):]) return compacted } // CompactFunc returns a slice containing only the first item from each contiguous run of items for // which eq returns true. // // Deprecated: slices.CompactFunc(slices.Clone(s)) is in the standard library as of Go 1.21. func CompactFunc[T any](s []T, eq func(T, T) bool) []T { return compactFuncInto([]T{}, s, eq) } // CompactInPlaceFunc returns a slice containing only the first item from each contiguous run of // items for which eq returns true. This is done in-place and so modifies the contents of s. The // modified slice is returned. func CompactInPlaceFunc[T any](s []T, eq func(T, T) bool) []T { compacted := compactFuncInto(s[:0], s, eq) Clear(s[len(compacted):]) return compacted } func compactFuncInto[T any](into []T, s []T, eq func(T, T) bool) []T { for i := range s { if i == 0 || !eq(s[i-1], s[i]) { into = append(into, s[i]) } } return into } // Equal returns true if a and b contain the same items in the same order. // // Deprecated: slices.Equal is in the standard library as of Go 1.21. func Equal[T comparable](a, b []T) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } // EqualFunc returns true if a and b contain the same items in the same order according to eq. // // Deprecated: slices.EqualFunc is in the standard library as of Go 1.21. func EqualFunc[T any](a, b []T, eq func(T, T) bool) bool { if len(a) != len(b) { return false } for i := range a { if !eq(a[i], b[i]) { return false } } return true } // Filter returns a slice containing only the elements of s for which keep() returns true in the // same order that they appeared in s. // // Deprecated: slices.DeleteFunc(slices.Clone(s), f) is in the standard library as of Go 1.21, // though the polarity of the passed function is opposite: return true to remove, rather than to // retain. func Filter[T any](s []T, keep func(t T) bool) []T { return filterInto([]T{}, s, keep) } // FilterInPlace returns a slice containing only the elements of s for which keep() returns true in // the same order that they appeared in s. This is done in-place and so modifies the contents of s. // The modified slice is returned. // // Deprecated: slices.DeleteFunc is in the standard library as of Go 1.21, though the polarity of // the passed function is opposite: return true to remove, rather than to retain. func FilterInPlace[T any](s []T, keep func(t T) bool) []T { filtered := filterInto(s[:0], s, keep) // Zero out the rest in case they contain pointers, so that filtered doesn't retain references. Clear(s[len(filtered):]) return filtered } func filterInto[T any](into []T, s []T, keep func(t T) bool) []T { for i := range s { if keep(s[i]) { into = append(into, s[i]) } } return into } // Grow grows s's capacity by reallocating, if necessary, to fit n more elements and returns the // modified slice. This does not change the length of s. After Grow(s, n), the following n // append()s to s will not need to reallocate. // // Deprecated: slices.Grow is in the standard library as of Go 1.21. func Grow[T any](s []T, n int) []T { if cap(s)-len(s) < n { x2 := make([]T, len(s)+n) copy(x2, s) return x2[:len(s)] } return s } // Index returns the first index of x in s, or -1 if x is not in s. // // Deprecated: slices.Index is in the standard library as of Go 1.21. func Index[T comparable](s []T, x T) int { for i := range s { if s[i] == x { return i } } return -1 } // Index returns the first index in s for which f(s[i]) returns true, or -1 if there are no such // items. // // Deprecated: slices.IndexFunc is in the standard library as of Go 1.21. func IndexFunc[T any](s []T, f func(T) bool) int { for i := range s { if f(s[i]) { return i } } return -1 } // Insert inserts the given values starting at index idx, shifting elements after idx to the right // and growing the slice to make room. Insert will expand the length of the slice up to its capacity // if it can, if this isn't desired then s should be resliced to have capacity equal to its length: // // s[:len(s):len(s)] // // The time cost is O(n+m) where n is len(values) and m is len(s[idx:]). // // Deprecated: slices.Insert is in the standard library as of Go 1.21. func Insert[T any](s []T, idx int, values ...T) []T { s = Grow(s, len(values)) s = s[: len(s)+len(values) : len(s)+len(values)] copy(s[idx+len(values):], s[idx:]) copy(s[idx:], values) return s } // Remove removes n elements from s starting at index idx and returns the modified slice. This // requires shifting the elements after the removed elements over, and so its cost is linear in the // number of elements shifted. // // Deprecated: slices.Delete is in the standard library as of Go 1.21, though slices.Delete takes // two indexes rather than an index and a length. func Remove[T any](s []T, idx int, n int) []T { copy(s[idx:], s[idx+n:]) Clear(s[len(s)-n:]) return s[:len(s)-n] } juniper-0.15.1/xslices/xslices_test.go000066400000000000000000000020421453027036000177620ustar00rootroot00000000000000package xslices import ( "testing" "github.com/bradenaw/juniper/internal/require2" ) func FuzzPartition(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte) { test := func(x byte) bool { return x%2 == 0 } t.Logf("in: %#v", b) t.Logf("in test: %#v", Map(b, test)) idx := Partition(b, test) t.Logf("out: %#v", b) t.Logf("out test: %#v", Map(b, test)) t.Logf("out idx: %d", idx) for i := 0; i < idx; i++ { require2.True(t, !test(b[i])) } for i := idx; i < len(b); i++ { require2.True(t, test(b[i])) } }) } func FuzzRemoveUnordered(f *testing.F) { f.Fuzz(func(t *testing.T, l int, idx int, n int) { if l < 0 || l > 255 || idx < 0 || idx > l-1 || n < 0 || n > l-idx { return } t.Logf("l = %d", l) t.Logf("idx = %d", idx) t.Logf("n = %d", n) x := make([]int, l) expected := make([]int, 0, l) for i := range x { x[i] = i if !(i >= idx && i < idx+n) { expected = append(expected, i) } } actual := RemoveUnordered(Clone(x), idx, n) require2.ElementsMatch(t, expected, actual) }) } juniper-0.15.1/xsort/000077500000000000000000000000001453027036000144315ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/000077500000000000000000000000001453027036000162425ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/fuzz/000077500000000000000000000000001453027036000172405ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/fuzz/FuzzMerge/000077500000000000000000000000001453027036000211565ustar00rootroot0000000000000013809475f3d15cec40695eb42e92cedb1340834bde29443e88744b84494638af000066400000000000000000000000561453027036000313530ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/fuzz/FuzzMergego test fuzz v1 []byte("0") int(0) int64(-44) 3a13beea93b6a963f9a530b1d6a25a89013281d73a1692d4a7e82f7f7a59c263000066400000000000000000000000551453027036000315420ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/fuzz/FuzzMergego test fuzz v1 []byte("") int(0) int64(-44) f4a395632ec0839487b2172c9916470d80756b1893f000796d0414c7aca99cdd000066400000000000000000000000601453027036000312020ustar00rootroot00000000000000juniper-0.15.1/xsort/testdata/fuzz/FuzzMergego test fuzz v1 []byte("00") int(-37) int64(65) juniper-0.15.1/xsort/xsort.go000066400000000000000000000114701453027036000161420ustar00rootroot00000000000000// Package xsort contains extensions to the standard library package sort. package xsort import ( "sort" "github.com/bradenaw/juniper/internal/heap" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xslices" ) // Returns true if a is less than b. Must follow the same rules as sort.Interface.Less. type Less[T any] func(a, b T) bool // Compile-time assert the types match. var _ Less[int] = OrderedLess[int] // Greater returns true if a > b according to less. func Greater[T any](less Less[T], a T, b T) bool { return less(b, a) } // LessOrEqual returns true if a <= b according to less. func LessOrEqual[T any](less Less[T], a T, b T) bool { // a <= b // !(a > b) // !(b < a) return !less(b, a) } // LessOrEqual returns true if a >= b according to less. func GreaterOrEqual[T any](less Less[T], a T, b T) bool { // a >= b // !(a < b) return !less(a, b) } // Equal returns true if a == b according to less. func Equal[T any](less Less[T], a T, b T) bool { return !less(a, b) && !less(b, a) } // Reverse returns a Less that orders elements in the opposite order of the provided less. func Reverse[T any](less Less[T]) Less[T] { return func(a, b T) bool { return less(b, a) } } // Slice sorts x in-place using the given less function to compare items. // // Follows the same rules as sort.Slice. // // Deprecated: slices.SortFunc is in the standard library as of Go 1.21. func Slice[T any](x []T, less Less[T]) { sort.Slice(x, func(i, j int) bool { return less(x[i], x[j]) }) } // SliceStable stably sorts x in-place using the given less function to compare items. // // Follows the same rules as sort.SliceStable. // // Deprecated: slices.SortStableFunc is in the standard library as of Go 1.21. func SliceStable[T any](x []T, less Less[T]) { sort.SliceStable(x, func(i, j int) bool { return less(x[i], x[j]) }) } // SliceIsSorted returns true if x is in sorted order according to the given less function. // // Follows the same rules as sort.SliceIsSorted. // // Deprecated: slices.IsSortedFunc is in the standard library as of Go 1.21. func SliceIsSorted[T any](x []T, less Less[T]) bool { return sort.SliceIsSorted(x, func(i, j int) bool { return less(x[i], x[j]) }) } // Search searches for item in x, assumed sorted according to less, and returns the index. The // return value is the index to insert item at if it is not present (it could be len(a)). // // Deprecated: slices.BinarySearchFunc is in the standard library as of Go 1.21. func Search[T any](x []T, less Less[T], item T) int { return sort.Search(len(x), func(i int) bool { return less(item, x[i]) || !less(x[i], item) }) } type valueAndSource[T any] struct { value T source int } type mergeIterator[T any] struct { in []iterator.Iterator[T] h heap.Heap[valueAndSource[T]] } func (iter *mergeIterator[T]) Next() (T, bool) { if iter.h.Len() == 0 { var zero T return zero, false } item := iter.h.Pop() nextItem, ok := iter.in[item.source].Next() if ok { iter.h.Push(valueAndSource[T]{nextItem, item.source}) } return item.value, true } // Merge returns an iterator that yields all items from in in sorted order. // // The results are undefined if the in iterators do not yield items in sorted order according to // less. // // The time complexity of Next() is O(log(k)) where k is len(in). func Merge[T any](less Less[T], in ...iterator.Iterator[T]) iterator.Iterator[T] { initial := make([]valueAndSource[T], 0, len(in)) for i := range in { item, ok := in[i].Next() if !ok { continue } initial = append(initial, valueAndSource[T]{item, i}) } h := heap.New( func(a, b valueAndSource[T]) bool { return less(a.value, b.value) }, func(a valueAndSource[T], i int) {}, initial, ) return &mergeIterator[T]{ in: in, h: h, } } // MergeSlices merges the already-sorted slices of in. Optionally, a pre-allocated out slice can be // provided to store the result into. // // The results are undefined if the in slices are not already sorted. // // The time complexity is O(n * log(k)) where n is the total number of items and k is len(in). func MergeSlices[T any](less Less[T], out []T, in ...[]T) []T { n := 0 for i := range in { n += len(in[i]) } out = xslices.Grow(out[:0], n) iter := Merge(less, xslices.Map(in, iterator.Slice[T])...) for { item, ok := iter.Next() if !ok { break } out = append(out, item) } return out } // MinK returns the k minimum items according to less from iter in sorted order. If iter yields // fewer than k items, MinK returns all of them. func MinK[T any](less Less[T], iter iterator.Iterator[T], k int) []T { h := heap.New[T](heap.Less[T](Reverse(less)), func(a T, i int) {}, nil) for { item, ok := iter.Next() if !ok { break } h.Push(item) if h.Len() > k { h.Pop() } } out := make([]T, h.Len()) for i := len(out) - 1; i >= 0; i-- { out[i] = h.Pop() } return out } juniper-0.15.1/xsort/xsort_go1.21.go000066400000000000000000000004351453027036000171300ustar00rootroot00000000000000//go:build go1.21 package xsort import ( "cmp" ) // OrderedLess is an implementation of Less for cmp.Ordered types by using the < operator. // // Deprecated: cmp.Less is in the standard library as of Go 1.21. func OrderedLess[T cmp.Ordered](a, b T) bool { return cmp.Less(a, b) } juniper-0.15.1/xsort/xsort_old.go000066400000000000000000000003611453027036000167750ustar00rootroot00000000000000//go:build !go1.21 package xsort import ( "golang.org/x/exp/constraints" ) // OrderedLess is an implementation of Less for cmp.Ordered types by using the < operator. func OrderedLess[T constraints.Ordered](a, b T) bool { return a < b } juniper-0.15.1/xsort/xsort_test.go000066400000000000000000000052771453027036000172110ustar00rootroot00000000000000package xsort_test import ( "fmt" "math/rand" "testing" "github.com/bradenaw/juniper/internal/require2" "github.com/bradenaw/juniper/iterator" "github.com/bradenaw/juniper/xsort" ) func TestMergeSlices(t *testing.T) { check := func(in ...[]int) { var all []int for i := range in { require2.True(t, xsort.SliceIsSorted(in[i], xsort.OrderedLess[int])) all = append(all, in[i]...) } merged := xsort.MergeSlices( xsort.OrderedLess[int], nil, in..., ) require2.True(t, xsort.SliceIsSorted(merged, xsort.OrderedLess[int])) require2.ElementsMatch(t, all, merged) } check([]int{1, 2, 3}) check( []int{1, 2, 3}, []int{4, 5, 6}, ) check( []int{1, 3, 5}, []int{2, 4, 6}, ) check( []int{1, 12, 19, 27}, []int{2, 7, 13}, []int{}, []int{5}, ) } func FuzzMerge(f *testing.F) { f.Fuzz(func(t *testing.T, b []byte, n int, seed int64) { if len(b) == 0 { return } if n <= 0 { return } r := rand.New(rand.NewSource(seed)) bs := make([][]byte, (n%len(b))+1) for i := range b { j := r.Intn(len(bs)) bs[j] = append(bs[j], b[i]) } for i := range bs { xsort.Slice(bs[i], xsort.OrderedLess[byte]) } expected := append([]byte{}, b...) xsort.Slice(expected, xsort.OrderedLess[byte]) merged := xsort.Merge( xsort.OrderedLess[byte], iterator.Collect( iterator.Map( iterator.Slice(bs), func(b []byte) iterator.Iterator[byte] { return iterator.Slice(b) }, ), )..., ) require2.SlicesEqual(t, expected, iterator.Collect(merged)) }) } func ExampleSearch() { x := []string{"a", "f", "h", "i", "p", "z"} fmt.Println(xsort.Search(x, xsort.OrderedLess[string], "h")) fmt.Println(xsort.Search(x, xsort.OrderedLess[string], "k")) // Output: // 2 // 4 } func ExampleMerge() { listOne := []string{"a", "f", "p", "x"} listTwo := []string{"b", "e", "o", "v"} listThree := []string{"s", "z"} merged := xsort.Merge( xsort.OrderedLess[string], iterator.Slice(listOne), iterator.Slice(listTwo), iterator.Slice(listThree), ) fmt.Println(iterator.Collect(merged)) // Output: // [a b e f o p s v x z] } func ExampleMergeSlices() { listOne := []string{"a", "f", "p", "x"} listTwo := []string{"b", "e", "o", "v"} listThree := []string{"s", "z"} merged := xsort.MergeSlices( xsort.OrderedLess[string], nil, listOne, listTwo, listThree, ) fmt.Println(merged) // Output: // [a b e f o p s v x z] } func ExampleMinK() { a := []int{7, 4, 3, 8, 2, 1, 6, 9, 0, 5} iter := iterator.Slice(a) min3 := xsort.MinK(xsort.OrderedLess[int], iter, 3) fmt.Println(min3) iter = iterator.Slice(a) max3 := xsort.MinK(xsort.Reverse(xsort.OrderedLess[int]), iter, 3) fmt.Println(max3) // Output: // [0 1 2] // [9 8 7] } juniper-0.15.1/xsync/000077500000000000000000000000001453027036000144165ustar00rootroot00000000000000juniper-0.15.1/xsync/xsync.go000066400000000000000000000163541453027036000161220ustar00rootroot00000000000000// Package xsync contains extensions to the standard library package sync. package xsync import ( "context" "math/rand" "sync" "time" ) // ContextCond is equivalent to sync.Cond, except its Wait function accepts a context.Context. // // ContextConds should not be copied after first use. type ContextCond struct { m sync.RWMutex ch chan struct{} L sync.Locker } // NewContextCond returns a new ContextCond with l as its Locker. func NewContextCond(l sync.Locker) *ContextCond { return &ContextCond{ L: l, ch: make(chan struct{}), } } // Broadcast wakes all goroutines blocked in Wait(), if there are any. // // It is allowed but not required for the caller to hold c.L during the call. func (c *ContextCond) Broadcast() { c.m.Lock() close(c.ch) c.ch = make(chan struct{}) c.m.Unlock() } // Signal wakes one goroutine blocked in Wait(), if there is any. No guarantee is made as to which // goroutine will wake. // // It is allowed but not required for the caller to hold c.L during the call. func (c *ContextCond) Signal() { c.m.RLock() select { case c.ch <- struct{}{}: default: } c.m.RUnlock() } // Wait is equivalent to sync.Cond.Wait, except it accepts a context.Context. If the context expires // before this goroutine is woken by Broadcast or Signal, it returns ctx.Err() immediately. If an // error is returned, does not reaquire c.L before returning. func (c *ContextCond) Wait(ctx context.Context) error { c.m.RLock() ch := c.ch c.m.RUnlock() c.L.Unlock() select { case <-ctx.Done(): return ctx.Err() case <-ch: c.L.Lock() } return nil } // Group manages a group of goroutines. type Group struct { ctx context.Context cancel context.CancelFunc // held in R when spawning to check if ctx is already cancelled and in W when cancelling ctx to // make sure we never cause wg to go 0->1 while inside Wait() m sync.RWMutex wg sync.WaitGroup } // NewGroup returns a Group ready for use. The context passed to any of the f functions will be a // descendant of ctx. func NewGroup(ctx context.Context) *Group { bgCtx, cancel := context.WithCancel(ctx) return &Group{ ctx: bgCtx, cancel: cancel, } } // helper even though it's exactly g.Do so that the goroutine stack for a spawned function doesn't // confusingly show all of them as created by Do. func (g *Group) spawn(f func()) { g.m.RLock() if g.ctx.Err() != nil { g.m.RUnlock() return } g.wg.Add(1) g.m.RUnlock() go func() { f() g.wg.Done() }() } // Do calls f once from another goroutine. func (g *Group) Do(f func(ctx context.Context)) { g.spawn(func() { f(g.ctx) }) } // returns a random duration in [d - jitter, d + jitter] func jitterDuration(d time.Duration, jitter time.Duration) time.Duration { return d + time.Duration(float64(jitter)*((rand.Float64()*2)-1)) } // Periodic spawns a goroutine that calls f once per interval +/- jitter. func (g *Group) Periodic( interval time.Duration, jitter time.Duration, f func(ctx context.Context), ) { g.spawn(func() { t := time.NewTimer(jitterDuration(interval, jitter)) defer t.Stop() for { if g.ctx.Err() != nil { return } select { case <-g.ctx.Done(): return case <-t.C: } t.Reset(jitterDuration(interval, jitter)) f(g.ctx) } }) } // Trigger spawns a goroutine which calls f whenever the returned function is called. If f is // already running when triggered, f will run again immediately when it finishes. func (g *Group) Trigger(f func(ctx context.Context)) func() { c := make(chan struct{}, 1) g.spawn(func() { for { if g.ctx.Err() != nil { return } select { case <-g.ctx.Done(): return case <-c: } f(g.ctx) } }) return func() { select { case c <- struct{}{}: default: } } } // PeriodicOrTrigger spawns a goroutine which calls f whenever the returned function is called. If // f is already running when triggered, f will run again immediately when it finishes. Also calls f // when it has been interval+/-jitter since the last trigger. func (g *Group) PeriodicOrTrigger( interval time.Duration, jitter time.Duration, f func(ctx context.Context), ) func() { c := make(chan struct{}, 1) g.spawn(func() { t := time.NewTimer(jitterDuration(interval, jitter)) defer t.Stop() for { if g.ctx.Err() != nil { return } select { case <-g.ctx.Done(): return case <-t.C: t.Reset(jitterDuration(interval, jitter)) case <-c: if !t.Stop() { <-t.C } t.Reset(jitterDuration(interval, jitter)) } f(g.ctx) } }) return func() { select { case c <- struct{}{}: default: } } } // Stop cancels the context passed to spawned goroutines. After the group is stopped, no more // goroutines will be spawned. func (g *Group) Stop() { g.m.Lock() g.cancel() g.m.Unlock() } // StopAndWait cancels the context passed to any of the spawned goroutines and waits for all spawned // goroutines to exit. After the group is stopped, no more goroutines will be spawned. func (g *Group) StopAndWait() { g.Stop() g.wg.Wait() } // Map is a typesafe wrapper over sync.Map. type Map[K comparable, V any] struct { m sync.Map } func (m *Map[K, V]) Delete(key K) { m.m.Delete(key) } func (m *Map[K, V]) Load(key K) (value V, ok bool) { value_, ok := m.m.Load(key) if !ok { var zero V return zero, false } return value_.(V), ok } func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { value_, ok := m.m.LoadAndDelete(key) if !ok { var zero V return zero, false } return value_.(V), ok } func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { actual_, loaded := m.m.LoadOrStore(key, value) return actual_.(V), loaded } func (m *Map[K, V]) Range(f func(key K, value V) bool) { m.m.Range(func(key, value interface{}) bool { return f(key.(K), value.(V)) }) } func (m *Map[K, V]) Store(key K, value V) { m.m.Store(key, value) } // Pool is a typesafe wrapper over sync.Pool. type Pool[T any] struct { p sync.Pool } func NewPool[T any](new_ func() T) Pool[T] { return Pool[T]{ p: sync.Pool{ New: func() interface{} { return new_() }, }, } } func (p *Pool[T]) Get() T { return p.p.Get().(T) } func (p *Pool[T]) Put(x T) { p.p.Put(x) } // Future can be filled with a value exactly once. Many goroutines can concurrently wait for it to // be filled. After filling, Wait() immediately returns the value it was filled with. // // Futures must be created by NewFuture and should not be copied after first use. type Future[T any] struct { c chan struct{} x T } // NewFuture returns a ready-to-use Future. func NewFuture[T any]() *Future[T] { return &Future[T]{ c: make(chan struct{}), } } // Fill fills f with value x. All active calls to Wait return x, and all future calls to Wait return // x immediately. // // Panics if f has already been filled. func (f *Future[T]) Fill(x T) { f.x = x close(f.c) } // Wait waits for f to be filled with a value and returns it. Returns immediately if f is already // filled. func (f *Future[T]) Wait() T { <-f.c return f.x } // Wait waits for f to be filled with a value and returns it, or returns ctx.Err() if ctx expires // before this happens. Returns immediately if f is already filled. func (f *Future[T]) WaitContext(ctx context.Context) (T, error) { select { case <-ctx.Done(): var zero T return zero, ctx.Err() case <-f.c: } return f.x, nil } juniper-0.15.1/xsync/xsync_go1.19.go000066400000000000000000000035251453027036000171140ustar00rootroot00000000000000//go:build go1.19 package xsync import ( "sync/atomic" ) // Watchable contains a value. It is similar to an atomic.Pointer[T] but allows notifying callers // that a new value has been set. type Watchable[T any] struct { p atomic.Pointer[watchableInner[T]] } type watchableInner[T any] struct { t T c chan struct{} } // Set sets the value in w and notifies callers of Value() that there is a new value. func (w *Watchable[T]) Set(t T) { newInner := &watchableInner[T]{ t: t, c: make(chan struct{}), } oldInner := w.p.Swap(newInner) if oldInner != nil { close(oldInner.c) } } // Value returns the current value inside w and a channel that will be closed when w is Set() to a // newer value than the returned one. // // If called before the first Set(), returns the zero value of T. // // Normal usage has an observer waiting for new values in a loop: // // for { // v, changed := w.Value() // // // do something with v // // <-changed // } // // Note that the value in w may have changed multiple times between successive calls to Value(), // Value() only ever returns the last-set value. This is by design so that slow observers cannot // block Set(), unlike sending values on a channel. func (w *Watchable[T]) Value() (T, chan struct{}) { inner := w.p.Load() if inner == nil { // There's no inner, meaning w has not been Set() yet. Try filling it with an empty inner, // so that we have a channel to listen on. c := make(chan struct{}) emptyInner := &watchableInner[T]{ c: c, } // CompareAndSwap so we don't accidentally smash a real value that got put between our Load // and here. if w.p.CompareAndSwap(nil, emptyInner) { var zero T return zero, c } // If we fell through to here somebody Set() while we were trying to do this, so there's // definitely an inner now. inner = w.p.Load() } return inner.t, inner.c } juniper-0.15.1/xsync/xsync_go1.19_test.go000066400000000000000000000011331453027036000201440ustar00rootroot00000000000000//go:build go1.19 package xsync import ( "fmt" "time" ) func ExampleWatchable() { start := time.Now() var w Watchable[int] w.Set(0) go func() { for i := 1; i < 20; i++ { w.Set(i) fmt.Printf("set %d at %s\n", i, time.Since(start).Round(time.Millisecond)) time.Sleep(5 * time.Millisecond) } }() for { v, changed := w.Value() if v == 19 { return } fmt.Printf("observed %d at %s\n", v, time.Since(start).Round(time.Millisecond)) // Sleep for longer between iterations to show that we don't slow down the setter. time.Sleep(17 * time.Millisecond) <-changed } } juniper-0.15.1/xsync/xsync_go1.21.go000066400000000000000000000013141453027036000170770ustar00rootroot00000000000000//go:build go1.21 package xsync import ( "sync" ) // Lazy makes a lazily-initialized value. On first access, it uses f to create the value. Later // accesses all receive the same value. // // Deprecated: sync.OnceValue is in the standard library as of Go 1.21. func Lazy[T any](f func() T) func() T { return sync.OnceValue(f) } func (m *Map[K, V]) CompareAndDelete(key K, old V) (deleted bool) { return m.m.CompareAndDelete(key, old) } func (m *Map[K, V]) CompareAndSwap(key K, old V, new V) (deleted bool) { return m.m.CompareAndSwap(key, old, new) } func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { previousUntyped, loaded := m.m.Swap(key, value) return previousUntyped.(V), loaded } juniper-0.15.1/xsync/xsync_old.go000066400000000000000000000005201453027036000167440ustar00rootroot00000000000000//go:build !go1.21 package xsync import ( "sync" ) // Lazy makes a lazily-initialized value. On first access, it uses f to create the value. Later // accesses all receive the same value. func Lazy[T any](f func() T) func() T { var once sync.Once var val T return func() T { once.Do(func() { val = f() }) return val } } juniper-0.15.1/xsync/xsync_test.go000066400000000000000000000032611453027036000171520ustar00rootroot00000000000000package xsync import ( "context" "fmt" "testing" "time" "github.com/bradenaw/juniper/xtime" ) func ExampleLazy() { var ( expensive = Lazy(func() string { fmt.Println("doing expensive init") return "foo" }) ) fmt.Println(expensive()) fmt.Println(expensive()) // Output: // doing expensive init // foo // foo } func TestGroup(t *testing.T) { g := NewGroup(context.Background()) dos := make(chan struct{}, 100) g.Do(func(ctx context.Context) { for { err := xtime.SleepContext(ctx, 50*time.Millisecond) if err != nil { return } select { case dos <- struct{}{}: default: } } }) periodics := make(chan struct{}, 100) g.Periodic(35*time.Millisecond, 0 /*jitter*/, func(ctx context.Context) { select { case periodics <- struct{}{}: default: } }) periodicOrTriggers := make(chan struct{}, 100) periodicOrTrigger := g.PeriodicOrTrigger(75*time.Millisecond, 0 /*jitter*/, func(ctx context.Context) { select { case periodicOrTriggers <- struct{}{}: default: } }) triggers := make(chan struct{}, 100) trigger := g.Trigger(func(ctx context.Context) { select { case triggers <- struct{}{}: default: } }) trigger() periodicOrTrigger() time.Sleep(200 * time.Millisecond) trigger() <-dos <-dos <-dos <-dos <-periodics <-periodics <-periodics <-periodics <-periodics <-periodicOrTriggers <-periodicOrTriggers <-periodicOrTriggers <-triggers <-triggers g.StopAndWait() g.Do(func(ctx context.Context) { panic("this will never spawn because StopAndWait was already called") }) // Jank, but just in case we'd be safe from the above panic just because the test is over. time.Sleep(200 * time.Millisecond) } juniper-0.15.1/xtime/000077500000000000000000000000001453027036000144005ustar00rootroot00000000000000juniper-0.15.1/xtime/xtime.go000066400000000000000000000063611453027036000160630ustar00rootroot00000000000000// Package xtime contains extensions to the standard library package time. package xtime import ( "context" "fmt" "math/rand" "sync" "time" ) type DeadlineTooSoonError struct { remaining time.Duration d time.Duration } func (err DeadlineTooSoonError) Error() string { return fmt.Sprintf( "not enough time remaining in context: %s remaining for %s sleep", err.remaining, err.d, ) } // SleepContext pauses the current goroutine for at least the duration d and returns nil, unless ctx // expires in the mean time in which case it returns ctx.Err(). // // A negative or zero duration causes SleepContext to return nil immediately. // // If there is less than d left until ctx's deadline, returns DeadlineTooSoonError immediately. func SleepContext(ctx context.Context, d time.Duration) error { if d <= 0 { return nil } deadline, ok := ctx.Deadline() if ok { remaining := time.Until(deadline) if remaining > d { return DeadlineTooSoonError{remaining: remaining, d: d} } } t := time.NewTimer(d) select { case <-ctx.Done(): t.Stop() return ctx.Err() case <-t.C: return nil } } // A JitterTicker holds a channel that delivers "ticks" of a clock at intervals. type JitterTicker struct { C <-chan time.Time c chan time.Time m sync.Mutex d time.Duration gen int jitter time.Duration timer *time.Timer } // NewJitterTicker is similar to time.NewTicker, but jitters the ticks by the given amount. That is, // each tick will be d+/-jitter apart. // // The duration d must be greater than zero and jitter must be less than d; if not, NewJitterTicker // will panic. func NewJitterTicker(d time.Duration, jitter time.Duration) *JitterTicker { if d <= 0 { panic("non-positive interval for NewJitterTicker") } if jitter >= d { panic("jitter greater than d") } c := make(chan time.Time, 1) t := &JitterTicker{ C: c, c: c, d: d, jitter: jitter, } t.m.Lock() t.schedule() t.m.Unlock() return t } func (t *JitterTicker) schedule() { if t.timer != nil { t.timer.Stop() } next := t.d + time.Duration(rand.Int63n(int64(t.jitter*2))) - (t.jitter) // To prevent a latent goroutine already spawned but not yet running the below function from // delivering a tick after Stop/Reset. t.gen++ gen := t.gen t.timer = time.AfterFunc(next, func() { t.m.Lock() if t.gen == gen { select { case t.c <- time.Now(): default: } t.schedule() } t.m.Unlock() }) } // Reset stops the ticker and resets its period to be the specified duration and jitter. The next // tick will arrive after the new period elapses. // // The duration d must be greater than zero and jitter must be less than d; if not, Reset will // panic. func (t *JitterTicker) Reset(d time.Duration, jitter time.Duration) { if d <= 0 { panic("non-positive interval for NewJitterTicker") } if jitter >= d { panic("jitter greater than d") } t.m.Lock() t.d = d t.jitter = jitter t.schedule() t.m.Unlock() } // Stop turns off the JitterTicker. After it returns, no more ticks will be sent. Stop does not // close the channel, to prevent a concurrent goroutine reading from the channel from seeing an // erroneous "tick". func (t *JitterTicker) Stop() { t.m.Lock() t.timer.Stop() t.gen++ t.timer = nil t.m.Unlock() } juniper-0.15.1/xtime/xtime_test.go000066400000000000000000000014531453027036000171170ustar00rootroot00000000000000package xtime import ( "testing" "time" ) func TestJitterTicker(t *testing.T) { d := 5 * time.Millisecond jitter := 2 * time.Millisecond ticker := NewJitterTicker(d, jitter) last := time.Now() check := func() { now := time.Now() elapsed := now.Sub(last) minTick := d - jitter // Add a little extra slack because of scheduling. maxTick := d + jitter + 3*time.Millisecond if elapsed < minTick { t.Fatalf("tick was %s, expected in [%s, %s]", elapsed, minTick, maxTick) } if elapsed > maxTick { t.Fatalf("tick was %s, expected in [%s, %s]", elapsed, minTick, maxTick) } last = now } for i := 0; i < 50; i++ { <-ticker.C check() } d = 10 * time.Millisecond jitter = 8 * time.Millisecond ticker.Reset(d, jitter) for i := 0; i < 20; i++ { <-ticker.C check() } }