pax_global_header00006660000000000000000000000064143763042670014526gustar00rootroot0000000000000052 comment=7b8c8f2875cb861bb61844c9bcaa1aed070adbd4 conc-0.3.0/000077500000000000000000000000001437630426700124505ustar00rootroot00000000000000conc-0.3.0/.github/000077500000000000000000000000001437630426700140105ustar00rootroot00000000000000conc-0.3.0/.github/workflows/000077500000000000000000000000001437630426700160455ustar00rootroot00000000000000conc-0.3.0/.github/workflows/go.yml000066400000000000000000000016101437630426700171730ustar00rootroot00000000000000# This workflow will build a golang project # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go name: Go on: push: branches: [ "main" ] pull_request: branches: [ "main" ] jobs: build: strategy: matrix: go-version: ['1.19', 'stable'] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Go ${{ matrix.go-version }} uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - name: Build run: go build -v ./... - name: Lint uses: golangci/golangci-lint-action@v3.3.1 with: version: latest args: --timeout 5m - name: Test run: go test -race -v ./... -coverprofile ./coverage.txt - name: Codecov uses: codecov/codecov-action@v3.1.1 with: files: ./coverage.txt conc-0.3.0/.golangci.yml000066400000000000000000000002361437630426700150350ustar00rootroot00000000000000linters: disable-all: true enable: - errcheck - godot - gosimple - govet - ineffassign - staticcheck - typecheck - unused conc-0.3.0/LICENSE000066400000000000000000000020541437630426700134560ustar00rootroot00000000000000MIT License Copyright (c) 2023 Sourcegraph Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. conc-0.3.0/README.md000066400000000000000000000331531437630426700137340ustar00rootroot00000000000000![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) # `conc`: better structured concurrency for go [![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) [![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) [![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) [![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) [![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) `conc` is your toolbelt for structured concurrency in go, making common tasks easier and safer. ```sh go get github.com/sourcegraph/conc ``` # At a glance - Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` - Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner - Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results - Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible - Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure - Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks - Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice - Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice - Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines All pools are created with [`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) or [`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), then configured with methods: - [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool - [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors - [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error - [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error - [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored # Goals The main goals of the package are: 1) Make it harder to leak goroutines 2) Handle panics gracefully 3) Make concurrent code easier to read ## Goal #1: Make it harder to leak goroutines A common pain point when working with goroutines is cleaning them up. It's really easy to fire off a `go` statement and fail to properly wait for it to complete. `conc` takes the opinionated stance that all concurrency should be scoped. That is, goroutines should have an owner and that owner should always ensure that its owned goroutines exit properly. In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and `(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out of scope. In some cases, you might want a spawned goroutine to outlast the scope of the caller. In that case, you could pass a `WaitGroup` into the spawning function. ```go func main() { var wg conc.WaitGroup defer wg.Wait() startTheThing(&wg) } func startTheThing(wg *conc.WaitGroup) { wg.Go(func() { ... }) } ``` For some more discussion on why scoped concurrency is nice, check out [this blog post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). ## Goal #2: Handle panics gracefully A frequent problem with goroutines in long-running applications is handling panics. A goroutine spawned without a panic handler will crash the whole process on panic. This is usually undesirable. However, if you do add a panic handler to a goroutine, what do you do with the panic once you catch it? Some options: 1) Ignore it 2) Log it 3) Turn it into an error and return that to the goroutine spawner 4) Propagate the panic to the goroutine spawner Ignoring panics is a bad idea since panics usually mean there is actually something wrong and someone should fix it. Just logging panics isn't great either because then there is no indication to the spawner that something bad happened, and it might just continue on as normal even though your program is in a really bad state. Both (3) and (4) are reasonable options, but both require the goroutine to have an owner that can actually receive the message that something went wrong. This is generally not true with a goroutine spawned with `go`, but in the `conc` package, all goroutines have an owner that must collect the spawned goroutine. In the conc package, any call to `Wait()` will panic if any of the spawned goroutines panicked. Additionally, it decorates the panic value with a stacktrace from the child goroutine so that you don't lose information about what caused the panic. Doing this all correctly every time you spawn something with `go` is not trivial and it requires a lot of boilerplate that makes the important parts of the code more difficult to read, so `conc` does this for you.
stdlib conc
```go type caughtPanicError struct { val any stack []byte } func (e *caughtPanicError) Error() string { return fmt.Sprintf( "panic: %q\n%s", e.val, string(e.stack) ) } func main() { done := make(chan error) go func() { defer func() { if v := recover(); v != nil { done <- &caughtPanicError{ val: v, stack: debug.Stack() } } else { done <- nil } }() doSomethingThatMightPanic() }() err := <-done if err != nil { panic(err) } } ``` ```go func main() { var wg conc.WaitGroup wg.Go(doSomethingThatMightPanic) // panics with a nice stacktrace wg.Wait() } ```
## Goal #3: Make concurrent code easier to read Doing concurrency correctly is difficult. Doing it in a way that doesn't obfuscate what the code is actually doing is more difficult. The `conc` package attempts to make common operations easier by abstracting as much boilerplate complexity as possible. Want to run a set of concurrent tasks with a bounded set of goroutines? Use `pool.New()`. Want to process an ordered stream of results concurrently, but still maintain order? Try `stream.New()`. What about a concurrent map over a slice? Take a peek at `iter.Map()`. Browse some examples below for some comparisons with doing these by hand. # Examples Each of these examples forgoes propagating panics for simplicity. To see what kind of complexity that would add, check out the "Goal #2" header above. Spawn a set of goroutines and waiting for them to finish:
stdlib conc
```go func main() { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() // crashes on panic! doSomething() }() } wg.Wait() } ``` ```go func main() { var wg conc.WaitGroup for i := 0; i < 10; i++ { wg.Go(doSomething) } wg.Wait() } ```
Process each element of a stream in a static pool of goroutines:
stdlib conc
```go func process(stream chan int) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() for elem := range stream { handle(elem) } }() } wg.Wait() } ``` ```go func process(stream chan int) { p := pool.New().WithMaxGoroutines(10) for elem := range stream { elem := elem p.Go(func() { handle(elem) }) } p.Wait() } ```
Process each element of a slice in a static pool of goroutines:
stdlib conc
```go func process(values []int) { feeder := make(chan int, 8) var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() for elem := range feeder { handle(elem) } }() } for _, value := range values { feeder <- value } close(feeder) wg.Wait() } ``` ```go func process(values []int) { iter.ForEach(values, handle) } ```
Concurrently map a slice:
stdlib conc
```go func concMap( input []int, f func(int) int, ) []int { res := make([]int, len(input)) var idx atomic.Int64 var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() for { i := int(idx.Add(1) - 1) if i >= len(input) { return } res[i] = f(input[i]) } }() } wg.Wait() return res } ``` ```go func concMap( input []int, f func(*int) int, ) []int { return iter.Map(input, f) } ```
Process an ordered stream concurrently:
stdlib conc
```go func mapStream( in chan int, out chan int, f func(int) int, ) { tasks := make(chan func()) taskResults := make(chan chan int) // Worker goroutines var workerWg sync.WaitGroup for i := 0; i < 10; i++ { workerWg.Add(1) go func() { defer workerWg.Done() for task := range tasks { task() } }() } // Ordered reader goroutines var readerWg sync.WaitGroup readerWg.Add(1) go func() { defer readerWg.Done() for result := range taskResults { item := <-result out <- item } }() // Feed the workers with tasks for elem := range in { resultCh := make(chan int, 1) taskResults <- resultCh tasks <- func() { resultCh <- f(elem) } } // We've exhausted input. // Wait for everything to finish close(tasks) workerWg.Wait() close(taskResults) readerWg.Wait() } ``` ```go func mapStream( in chan int, out chan int, f func(int) int, ) { s := stream.New().WithMaxGoroutines(10) for elem := range in { elem := elem s.Go(func() stream.Callback { res := f(elem) return func() { out <- res } }) } s.Wait() } ```
# Status This package is currently pre-1.0. There are likely to be minor breaking changes before a 1.0 release as we stabilize the APIs and tweak defaults. Please open an issue if you have questions, concerns, or requests that you'd like addressed before the 1.0 release. Currently, a 1.0 is targeted for March 2023. conc-0.3.0/go.mod000066400000000000000000000007231437630426700135600ustar00rootroot00000000000000module github.com/sourcegraph/conc go 1.19 require ( github.com/stretchr/testify v1.8.1 go.uber.org/multierr v1.9.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect go.uber.org/atomic v1.7.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) conc-0.3.0/go.sum000066400000000000000000000060771437630426700136150ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= conc-0.3.0/internal/000077500000000000000000000000001437630426700142645ustar00rootroot00000000000000conc-0.3.0/internal/multierror/000077500000000000000000000000001437630426700164705ustar00rootroot00000000000000conc-0.3.0/internal/multierror/multierror_go119.go000066400000000000000000000001721437630426700221430ustar00rootroot00000000000000//go:build !go1.20 // +build !go1.20 package multierror import "go.uber.org/multierr" var ( Join = multierr.Combine ) conc-0.3.0/internal/multierror/multierror_go120.go000066400000000000000000000001451437630426700221330ustar00rootroot00000000000000//go:build go1.20 // +build go1.20 package multierror import "errors" var ( Join = errors.Join ) conc-0.3.0/iter/000077500000000000000000000000001437630426700134135ustar00rootroot00000000000000conc-0.3.0/iter/iter.go000066400000000000000000000050241437630426700147060ustar00rootroot00000000000000package iter import ( "runtime" "sync/atomic" "github.com/sourcegraph/conc" ) // defaultMaxGoroutines returns the default maximum number of // goroutines to use within this package. func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } // Iterator can be used to configure the behaviour of ForEach // and ForEachIdx. The zero value is safe to use with reasonable // defaults. // // Iterator is also safe for reuse and concurrent use. type Iterator[T any] struct { // MaxGoroutines controls the maximum number of goroutines // to use on this Iterator's methods. // // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). MaxGoroutines int } // ForEach executes f in parallel over each element in input. // // It is safe to mutate the input parameter, which makes it // possible to map in place. // // ForEach always uses at most runtime.GOMAXPROCS goroutines. // It takes roughly 2µs to start up the goroutines and adds // an overhead of roughly 50ns per element of input. For // a configurable goroutine limit, use a custom Iterator. func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } // ForEach executes f in parallel over each element in input, // using up to the Iterator's configured maximum number of // goroutines. // // It is safe to mutate the input parameter, which makes it // possible to map in place. // // It takes roughly 2µs to start up the goroutines and adds // an overhead of roughly 50ns per element of input. func (iter Iterator[T]) ForEach(input []T, f func(*T)) { iter.ForEachIdx(input, func(_ int, t *T) { f(t) }) } // ForEachIdx is the same as ForEach except it also provides the // index of the element to the callback. func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } // ForEachIdx is the same as ForEach except it also provides the // index of the element to the callback. func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { if iter.MaxGoroutines == 0 { // iter is a value receiver and is hence safe to mutate iter.MaxGoroutines = defaultMaxGoroutines() } numInput := len(input) if iter.MaxGoroutines > numInput { // No more concurrent tasks than the number of input items. iter.MaxGoroutines = numInput } var idx atomic.Int64 // Create the task outside the loop to avoid extra closure allocations. task := func() { i := int(idx.Add(1) - 1) for ; i < numInput; i = int(idx.Add(1) - 1) { f(i, &input[i]) } } var wg conc.WaitGroup for i := 0; i < iter.MaxGoroutines; i++ { wg.Go(task) } wg.Wait() } conc-0.3.0/iter/iter_test.go000066400000000000000000000072331437630426700157510ustar00rootroot00000000000000package iter import ( "fmt" "strconv" "sync/atomic" "testing" "github.com/stretchr/testify/require" ) func ExampleIterator() { input := []int{1, 2, 3, 4} iterator := Iterator[int]{ MaxGoroutines: len(input) / 2, } iterator.ForEach(input, func(v *int) { if *v%2 != 0 { *v = -1 } }) fmt.Println(input) // Output: // [-1 2 -1 4] } func TestIterator(t *testing.T) { t.Parallel() t.Run("safe for reuse", func(t *testing.T) { t.Parallel() iterator := Iterator[int]{MaxGoroutines: 999} // iter.Concurrency > numInput case that updates iter.Concurrency iterator.ForEachIdx([]int{1, 2, 3}, func(i int, t *int) {}) require.Equal(t, iterator.MaxGoroutines, 999) }) t.Run("allows more than defaultMaxGoroutines() concurrent tasks", func(t *testing.T) { t.Parallel() wantConcurrency := 2 * defaultMaxGoroutines() maxConcurrencyHit := make(chan struct{}) tasks := make([]int, wantConcurrency) iterator := Iterator[int]{MaxGoroutines: wantConcurrency} var concurrentTasks atomic.Int64 iterator.ForEach(tasks, func(t *int) { n := concurrentTasks.Add(1) defer concurrentTasks.Add(-1) if int(n) == wantConcurrency { // All our tasks are running concurrently. // Signal to the rest of the tasks to stop. close(maxConcurrencyHit) } else { // Wait until we hit max concurrency before exiting. // This ensures that all tasks have been started // in parallel, despite being a larger input set than // defaultMaxGoroutines(). <-maxConcurrencyHit } }) }) } func TestForEachIdx(t *testing.T) { t.Parallel() t.Run("empty", func(t *testing.T) { t.Parallel() f := func() { ints := []int{} ForEachIdx(ints, func(i int, val *int) { panic("this should never be called") }) } require.NotPanics(t, f) }) t.Run("panic is propagated", func(t *testing.T) { t.Parallel() f := func() { ints := []int{1} ForEachIdx(ints, func(i int, val *int) { panic("super bad thing happened") }) } require.Panics(t, f) }) t.Run("mutating inputs is fine", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} ForEachIdx(ints, func(i int, val *int) { *val += 1 }) require.Equal(t, []int{2, 3, 4, 5, 6}, ints) }) t.Run("huge inputs", func(t *testing.T) { t.Parallel() ints := make([]int, 10000) ForEachIdx(ints, func(i int, val *int) { *val = i }) expected := make([]int, 10000) for i := 0; i < 10000; i++ { expected[i] = i } require.Equal(t, expected, ints) }) } func TestForEach(t *testing.T) { t.Parallel() t.Run("empty", func(t *testing.T) { t.Parallel() f := func() { ints := []int{} ForEach(ints, func(val *int) { panic("this should never be called") }) } require.NotPanics(t, f) }) t.Run("panic is propagated", func(t *testing.T) { t.Parallel() f := func() { ints := []int{1} ForEach(ints, func(val *int) { panic("super bad thing happened") }) } require.Panics(t, f) }) t.Run("mutating inputs is fine", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} ForEach(ints, func(val *int) { *val += 1 }) require.Equal(t, []int{2, 3, 4, 5, 6}, ints) }) t.Run("huge inputs", func(t *testing.T) { t.Parallel() ints := make([]int, 10000) ForEach(ints, func(val *int) { *val = 1 }) expected := make([]int, 10000) for i := 0; i < 10000; i++ { expected[i] = 1 } require.Equal(t, expected, ints) }) } func BenchmarkForEach(b *testing.B) { for _, count := range []int{0, 1, 8, 100, 1000, 10000, 100000} { b.Run(strconv.Itoa(count), func(b *testing.B) { ints := make([]int, count) for i := 0; i < b.N; i++ { ForEach(ints, func(i *int) { *i = 0 }) } }) } } conc-0.3.0/iter/map.go000066400000000000000000000036301437630426700145210ustar00rootroot00000000000000package iter import ( "sync" "github.com/sourcegraph/conc/internal/multierror" ) // Mapper is an Iterator with a result type R. It can be used to configure // the behaviour of Map and MapErr. The zero value is safe to use with // reasonable defaults. // // Mapper is also safe for reuse and concurrent use. type Mapper[T, R any] Iterator[T] // Map applies f to each element of input, returning the mapped result. // // Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable // goroutine limit, use a custom Mapper. func Map[T, R any](input []T, f func(*T) R) []R { return Mapper[T, R]{}.Map(input, f) } // Map applies f to each element of input, returning the mapped result. // // Map uses up to the configured Mapper's maximum number of goroutines. func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { res := make([]R, len(input)) Iterator[T](m).ForEachIdx(input, func(i int, t *T) { res[i] = f(t) }) return res } // MapErr applies f to each element of the input, returning the mapped result // and a combined error of all returned errors. // // Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable // goroutine limit, use a custom Mapper. func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { return Mapper[T, R]{}.MapErr(input, f) } // MapErr applies f to each element of the input, returning the mapped result // and a combined error of all returned errors. // // Map uses up to the configured Mapper's maximum number of goroutines. func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { var ( res = make([]R, len(input)) errMux sync.Mutex errs error ) Iterator[T](m).ForEachIdx(input, func(i int, t *T) { var err error res[i], err = f(t) if err != nil { errMux.Lock() // TODO: use stdlib errors once multierrors land in go 1.20 errs = multierror.Join(errs, err) errMux.Unlock() } }) return res, errs } conc-0.3.0/iter/map_test.go000066400000000000000000000075031437630426700155630ustar00rootroot00000000000000package iter import ( "errors" "fmt" "testing" "github.com/stretchr/testify/require" ) func ExampleMapper() { input := []int{1, 2, 3, 4} mapper := Mapper[int, bool]{ MaxGoroutines: len(input) / 2, } results := mapper.Map(input, func(v *int) bool { return *v%2 == 0 }) fmt.Println(results) // Output: // [false true false true] } func TestMap(t *testing.T) { t.Parallel() t.Run("empty", func(t *testing.T) { t.Parallel() f := func() { ints := []int{} Map(ints, func(val *int) int { panic("this should never be called") }) } require.NotPanics(t, f) }) t.Run("panic is propagated", func(t *testing.T) { t.Parallel() f := func() { ints := []int{1} Map(ints, func(val *int) int { panic("super bad thing happened") }) } require.Panics(t, f) }) t.Run("mutating inputs is fine, though not recommended", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} Map(ints, func(val *int) int { *val += 1 return 0 }) require.Equal(t, []int{2, 3, 4, 5, 6}, ints) }) t.Run("basic increment", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} res := Map(ints, func(val *int) int { return *val + 1 }) require.Equal(t, []int{2, 3, 4, 5, 6}, res) require.Equal(t, []int{1, 2, 3, 4, 5}, ints) }) t.Run("huge inputs", func(t *testing.T) { t.Parallel() ints := make([]int, 10000) res := Map(ints, func(val *int) int { return 1 }) expected := make([]int, 10000) for i := 0; i < 10000; i++ { expected[i] = 1 } require.Equal(t, expected, res) }) } func TestMapErr(t *testing.T) { t.Parallel() t.Run("empty", func(t *testing.T) { t.Parallel() f := func() { ints := []int{} res, err := MapErr(ints, func(val *int) (int, error) { panic("this should never be called") }) require.NoError(t, err) require.Equal(t, ints, res) } require.NotPanics(t, f) }) t.Run("panic is propagated", func(t *testing.T) { t.Parallel() f := func() { ints := []int{1} _, _ = MapErr(ints, func(val *int) (int, error) { panic("super bad thing happened") }) } require.Panics(t, f) }) t.Run("mutating inputs is fine, though not recommended", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} res, err := MapErr(ints, func(val *int) (int, error) { *val += 1 return 0, nil }) require.NoError(t, err) require.Equal(t, []int{2, 3, 4, 5, 6}, ints) require.Equal(t, []int{0, 0, 0, 0, 0}, res) }) t.Run("basic increment", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} res, err := MapErr(ints, func(val *int) (int, error) { return *val + 1, nil }) require.NoError(t, err) require.Equal(t, []int{2, 3, 4, 5, 6}, res) require.Equal(t, []int{1, 2, 3, 4, 5}, ints) }) err1 := errors.New("error1") err2 := errors.New("error1") t.Run("error is propagated", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} res, err := MapErr(ints, func(val *int) (int, error) { if *val == 3 { return 0, err1 } return *val + 1, nil }) require.ErrorIs(t, err, err1) require.Equal(t, []int{2, 3, 0, 5, 6}, res) require.Equal(t, []int{1, 2, 3, 4, 5}, ints) }) t.Run("multiple errors are propagated", func(t *testing.T) { t.Parallel() ints := []int{1, 2, 3, 4, 5} res, err := MapErr(ints, func(val *int) (int, error) { if *val == 3 { return 0, err1 } if *val == 4 { return 0, err2 } return *val + 1, nil }) require.ErrorIs(t, err, err1) require.ErrorIs(t, err, err2) require.Equal(t, []int{2, 3, 0, 0, 6}, res) require.Equal(t, []int{1, 2, 3, 4, 5}, ints) }) t.Run("huge inputs", func(t *testing.T) { t.Parallel() ints := make([]int, 10000) res := Map(ints, func(val *int) int { return 1 }) expected := make([]int, 10000) for i := 0; i < 10000; i++ { expected[i] = 1 } require.Equal(t, expected, res) }) } conc-0.3.0/panics/000077500000000000000000000000001437630426700137255ustar00rootroot00000000000000conc-0.3.0/panics/panics.go000066400000000000000000000056151437630426700155400ustar00rootroot00000000000000package panics import ( "fmt" "runtime" "runtime/debug" "sync/atomic" ) // Catcher is used to catch panics. You can execute a function with Try, // which will catch any spawned panic. Try can be called any number of times, // from any number of goroutines. Once all calls to Try have completed, you can // get the value of the first panic (if any) with Recovered(), or you can just // propagate the panic (re-panic) with Repanic(). type Catcher struct { recovered atomic.Pointer[Recovered] } // Try executes f, catching any panic it might spawn. It is safe // to call from multiple goroutines simultaneously. func (p *Catcher) Try(f func()) { defer p.tryRecover() f() } func (p *Catcher) tryRecover() { if val := recover(); val != nil { rp := NewRecovered(1, val) p.recovered.CompareAndSwap(nil, &rp) } } // Repanic panics if any calls to Try caught a panic. It will panic with the // value of the first panic caught, wrapped in a panics.Recovered with caller // information. func (p *Catcher) Repanic() { if val := p.Recovered(); val != nil { panic(val) } } // Recovered returns the value of the first panic caught by Try, or nil if // no calls to Try panicked. func (p *Catcher) Recovered() *Recovered { return p.recovered.Load() } // NewRecovered creates a panics.Recovered from a panic value and a collected // stacktrace. The skip parameter allows the caller to skip stack frames when // collecting the stacktrace. Calling with a skip of 0 means include the call to // NewRecovered in the stacktrace. func NewRecovered(skip int, value any) Recovered { // 64 frames should be plenty var callers [64]uintptr n := runtime.Callers(skip+1, callers[:]) return Recovered{ Value: value, Callers: callers[:n], Stack: debug.Stack(), } } // Recovered is a panic that was caught with recover(). type Recovered struct { // The original value of the panic. Value any // The caller list as returned by runtime.Callers when the panic was // recovered. Can be used to produce a more detailed stack information with // runtime.CallersFrames. Callers []uintptr // The formatted stacktrace from the goroutine where the panic was recovered. // Easier to use than Callers. Stack []byte } // String renders a human-readable formatting of the panic. func (p *Recovered) String() string { return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) } // AsError casts the panic into an error implementation. The implementation // is unwrappable with the cause of the panic, if the panic was provided one. func (p *Recovered) AsError() error { if p == nil { return nil } return &ErrRecovered{*p} } // ErrRecovered wraps a panics.Recovered in an error implementation. type ErrRecovered struct{ Recovered } var _ error = (*ErrRecovered)(nil) func (p *ErrRecovered) Error() string { return p.String() } func (p *ErrRecovered) Unwrap() error { if err, ok := p.Value.(error); ok { return err } return nil } conc-0.3.0/panics/panics_test.go000066400000000000000000000076221437630426700165770ustar00rootroot00000000000000package panics import ( "errors" "fmt" "runtime" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func ExampleCatcher() { var pc Catcher i := 0 pc.Try(func() { i += 1 }) pc.Try(func() { panic("abort!") }) pc.Try(func() { i += 1 }) rc := pc.Recovered() fmt.Println(i) fmt.Println(rc.Value.(string)) // Output: // 2 // abort! } func ExampleCatcher_callers() { var pc Catcher pc.Try(func() { panic("mayday!") }) recovered := pc.Recovered() // For debugging, the pre-formatted recovered.Stack is easier to use than // rc.Callers. This is not used in the example because its output is // machine-specific. frames := runtime.CallersFrames(recovered.Callers) for { frame, more := frames.Next() fmt.Println(frame.Function) if !more { break } } // Output: // github.com/sourcegraph/conc/panics.(*Catcher).tryRecover // runtime.gopanic // github.com/sourcegraph/conc/panics.ExampleCatcher_callers.func1 // github.com/sourcegraph/conc/panics.(*Catcher).Try // github.com/sourcegraph/conc/panics.ExampleCatcher_callers // testing.runExample // testing.runExamples // testing.(*M).Run // main.main // runtime.main // runtime.goexit } func ExampleCatcher_error() { helper := func() error { var pc Catcher pc.Try(func() { panic(errors.New("error")) }) return pc.Recovered().AsError() } if err := helper(); err != nil { // In normal use cases, you can use err.Error() output directly to // dump the panic's stack. This is not used in the example because // its output is machine-specific - instead, we demonstrate getting // the underlying error that was used for the panic. if cause := errors.Unwrap(err); cause != nil { fmt.Printf("helper panicked with an error: %s", cause) } } // Output: // helper panicked with an error: error } func TestCatcher(t *testing.T) { t.Parallel() err1 := errors.New("SOS") t.Run("error", func(t *testing.T) { t.Parallel() var pc Catcher pc.Try(func() { panic(err1) }) recovered := pc.Recovered() require.ErrorIs(t, recovered.AsError(), err1) require.ErrorAs(t, recovered.AsError(), &err1) // The exact contents aren't tested because the stacktrace contains local file paths // and even the structure of the stacktrace is bound to be unstable over time. Just // test a couple of basics. require.Contains(t, recovered.String(), "SOS", "formatted panic should contain the panic message") require.Contains(t, recovered.String(), "panics.(*Catcher).Try", recovered.String(), "formatted panic should contain the stack trace") }) t.Run("not error", func(t *testing.T) { var pc Catcher pc.Try(func() { panic("definitely not an error") }) recovered := pc.Recovered() require.NotErrorIs(t, recovered.AsError(), err1) require.Nil(t, errors.Unwrap(recovered.AsError())) }) t.Run("repanic panics", func(t *testing.T) { var pc Catcher pc.Try(func() { panic(err1) }) require.Panics(t, pc.Repanic) }) t.Run("repanic does not panic without child panic", func(t *testing.T) { t.Parallel() var pc Catcher pc.Try(func() { _ = 1 }) require.NotPanics(t, pc.Repanic) }) t.Run("is goroutine safe", func(t *testing.T) { t.Parallel() var wg sync.WaitGroup var pc Catcher for i := 0; i < 100; i++ { i := i wg.Add(1) func() { defer wg.Done() pc.Try(func() { if i == 50 { panic("50") } }) }() } wg.Wait() require.Equal(t, "50", pc.Recovered().Value) }) } func TestRecoveredAsError(t *testing.T) { t.Parallel() t.Run("as error is nil", func(t *testing.T) { t.Parallel() fn := func() error { var c Catcher c.Try(func() {}) return c.Recovered().AsError() } err := fn() assert.Nil(t, err) }) t.Run("as error is not nil nil", func(t *testing.T) { t.Parallel() fn := func() error { var c Catcher c.Try(func() { panic("oh dear!") }) return c.Recovered().AsError() } err := fn() assert.NotNil(t, err) }) } conc-0.3.0/panics/try.go000066400000000000000000000004431437630426700150730ustar00rootroot00000000000000package panics // Try executes f, catching and returning any panic it might spawn. // // The recovered panic can be propagated with panic(), or handled as a normal error with // (*panics.Recovered).AsError(). func Try(f func()) *Recovered { var c Catcher c.Try(f) return c.Recovered() } conc-0.3.0/panics/try_test.go000066400000000000000000000016251437630426700161350ustar00rootroot00000000000000package panics import ( "errors" "testing" "github.com/stretchr/testify/require" ) func TestTry(t *testing.T) { t.Parallel() t.Run("panics", func(t *testing.T) { t.Parallel() err := errors.New("SOS") recovered := Try(func() { panic(err) }) require.ErrorIs(t, recovered.AsError(), err) require.ErrorAs(t, recovered.AsError(), &err) // The exact contents aren't tested because the stacktrace contains local file paths // and even the structure of the stacktrace is bound to be unstable over time. Just // test a couple of basics. require.Contains(t, recovered.String(), "SOS", "formatted panic should contain the panic message") require.Contains(t, recovered.String(), "panics.(*Catcher).Try", recovered.String(), "formatted panic should contain the stack trace") }) t.Run("no panic", func(t *testing.T) { t.Parallel() recovered := Try(func() {}) require.Nil(t, recovered) }) } conc-0.3.0/pool/000077500000000000000000000000001437630426700134215ustar00rootroot00000000000000conc-0.3.0/pool/context_pool.go000066400000000000000000000055451437630426700164760ustar00rootroot00000000000000package pool import ( "context" ) // ContextPool is a pool that runs tasks that take a context. // A new ContextPool should be created with `New().WithContext(ctx)`. // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. type ContextPool struct { errorPool ErrorPool ctx context.Context cancel context.CancelFunc cancelOnError bool } // Go submits a task. If it returns an error, the error will be // collected and returned by Wait(). If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *ContextPool) Go(f func(ctx context.Context) error) { p.errorPool.Go(func() error { if p.cancelOnError { // If we are cancelling on error, then we also want to cancel if a // panic is raised. To do this, we need to recover, cancel, and then // re-throw the caught panic. defer func() { if r := recover(); r != nil { p.cancel() panic(r) } }() } err := f(p.ctx) if err != nil && p.cancelOnError { // Leaky abstraction warning: We add the error directly because // otherwise, canceling could cause another goroutine to exit and // return an error before this error was added, which breaks the // expectations of WithFirstError(). p.errorPool.addErr(err) p.cancel() return nil } return err }) } // Wait cleans up all spawned goroutines, propagates any panics, and // returns an error if any of the tasks errored. func (p *ContextPool) Wait() error { // Make sure we call cancel after pool is done to avoid memory leakage. defer p.cancel() return p.errorPool.Wait() } // WithFirstError configures the pool to only return the first error // returned by a task. By default, Wait() will return a combined error. // This is particularly useful for (*ContextPool).WithCancelOnError(), // where all errors after the first are likely to be context.Canceled. func (p *ContextPool) WithFirstError() *ContextPool { p.panicIfInitialized() p.errorPool.WithFirstError() return p } // WithCancelOnError configures the pool to cancel its context as soon as // any task returns an error or panics. By default, the pool's context is not // canceled until the parent context is canceled. // // In this case, all errors returned from the pool after the first will // likely be context.Canceled - you may want to also use // (*ContextPool).WithFirstError() to configure the pool to only return // the first error. func (p *ContextPool) WithCancelOnError() *ContextPool { p.panicIfInitialized() p.cancelOnError = true return p } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool { p.panicIfInitialized() p.errorPool.WithMaxGoroutines(n) return p } func (p *ContextPool) panicIfInitialized() { p.errorPool.panicIfInitialized() } conc-0.3.0/pool/context_pool_test.go000066400000000000000000000140201437630426700175210ustar00rootroot00000000000000package pool import ( "context" "errors" "fmt" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func ExampleContextPool_WithCancelOnError() { p := New(). WithMaxGoroutines(4). WithContext(context.Background()). WithCancelOnError() for i := 0; i < 3; i++ { i := i p.Go(func(ctx context.Context) error { if i == 2 { return errors.New("I will cancel all other tasks!") } <-ctx.Done() return nil }) } err := p.Wait() fmt.Println(err) // Output: // I will cancel all other tasks! } func TestContextPool(t *testing.T) { t.Parallel() err1 := errors.New("err1") err2 := errors.New("err2") bgctx := context.Background() t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := New().WithContext(context.Background()) g.Go(func(context.Context) error { return nil }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := New().WithContext(context.Background()) g.Go(func(context.Context) error { return nil }) _ = g.Wait() require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("behaves the same as ErrorGroup", func(t *testing.T) { t.Parallel() t.Run("wait returns no error if no errors", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx) p.Go(func(context.Context) error { return nil }) require.NoError(t, p.Wait()) }) t.Run("wait errors if func returns error", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx) p.Go(func(context.Context) error { return err1 }) require.ErrorIs(t, p.Wait(), err1) }) t.Run("wait error is all returned errors", func(t *testing.T) { t.Parallel() p := New().WithErrors().WithContext(bgctx) p.Go(func(context.Context) error { return err1 }) p.Go(func(context.Context) error { return nil }) p.Go(func(context.Context) error { return err2 }) err := p.Wait() require.ErrorIs(t, err, err1) require.ErrorIs(t, err, err2) }) }) t.Run("context error propagates", func(t *testing.T) { t.Parallel() t.Run("canceled", func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(bgctx) p := New().WithContext(ctx) p.Go(func(ctx context.Context) error { <-ctx.Done() return ctx.Err() }) cancel() require.ErrorIs(t, p.Wait(), context.Canceled) }) t.Run("timed out", func(t *testing.T) { t.Parallel() ctx, cancel := context.WithTimeout(bgctx, time.Millisecond) defer cancel() p := New().WithContext(ctx) p.Go(func(ctx context.Context) error { <-ctx.Done() return ctx.Err() }) require.ErrorIs(t, p.Wait(), context.DeadlineExceeded) }) }) t.Run("WithCancelOnError", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx).WithCancelOnError() p.Go(func(ctx context.Context) error { <-ctx.Done() return ctx.Err() }) p.Go(func(ctx context.Context) error { return err1 }) err := p.Wait() require.ErrorIs(t, err, context.Canceled) require.ErrorIs(t, err, err1) }) t.Run("no WithCancelOnError", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx) p.Go(func(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() case <-time.After(10 * time.Millisecond): return nil } }) p.Go(func(ctx context.Context) error { return err1 }) err := p.Wait() require.ErrorIs(t, err, err1) require.NotErrorIs(t, err, context.Canceled) }) t.Run("WithFirstError", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx).WithFirstError() sync := make(chan struct{}) p.Go(func(ctx context.Context) error { defer close(sync) return err1 }) p.Go(func(ctx context.Context) error { // This test has a race condition. After the first goroutine // completes, this goroutine is woken up because sync is closed. // However, this goroutine might be woken up before the error from // the first goroutine is registered. To prevent that, we sleep for // another 10 milliseconds, giving the other goroutine time to return // and register its error before this goroutine returns its error. <-sync time.Sleep(10 * time.Millisecond) return err2 }) err := p.Wait() require.ErrorIs(t, err, err1) require.NotErrorIs(t, err, err2) }) t.Run("WithFirstError and WithCancelOnError", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx).WithFirstError().WithCancelOnError() p.Go(func(ctx context.Context) error { return err1 }) p.Go(func(ctx context.Context) error { <-ctx.Done() return ctx.Err() }) err := p.Wait() require.ErrorIs(t, err, err1) require.NotErrorIs(t, err, context.Canceled) }) t.Run("WithCancelOnError and panic", func(t *testing.T) { t.Parallel() p := New().WithContext(bgctx).WithCancelOnError() var cancelledTasks atomic.Int64 p.Go(func(ctx context.Context) error { <-ctx.Done() cancelledTasks.Add(1) return ctx.Err() }) p.Go(func(ctx context.Context) error { <-ctx.Done() cancelledTasks.Add(1) return ctx.Err() }) p.Go(func(ctx context.Context) error { panic("abort!") }) assert.Panics(t, func() { _ = p.Wait() }) assert.EqualValues(t, 2, cancelledTasks.Load()) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxConcurrent := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxConcurrent), func(t *testing.T) { maxConcurrent := maxConcurrent // copy t.Parallel() p := New().WithContext(bgctx).WithMaxGoroutines(maxConcurrent) var currentConcurrent atomic.Int64 for i := 0; i < 100; i++ { p.Go(func(context.Context) error { cur := currentConcurrent.Add(1) if cur > int64(maxConcurrent) { return fmt.Errorf("expected no more than %d concurrent goroutine", maxConcurrent) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) return nil }) } require.NoError(t, p.Wait()) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) } conc-0.3.0/pool/error_pool.go000066400000000000000000000046631437630426700161430ustar00rootroot00000000000000package pool import ( "context" "sync" "github.com/sourcegraph/conc/internal/multierror" ) // ErrorPool is a pool that runs tasks that may return an error. // Errors are collected and returned by Wait(). // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. // // A new ErrorPool should be created using `New().WithErrors()`. type ErrorPool struct { pool Pool onlyFirstError bool mu sync.Mutex errs error } // Go submits a task to the pool. If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *ErrorPool) Go(f func() error) { p.pool.Go(func() { p.addErr(f()) }) } // Wait cleans up any spawned goroutines, propagating any panics and // returning any errors from tasks. func (p *ErrorPool) Wait() error { p.pool.Wait() return p.errs } // WithContext converts the pool to a ContextPool for tasks that should // run under the same context, such that they each respect shared cancellation. // For example, WithCancelOnError can be configured on the returned pool to // signal that all goroutines should be cancelled upon the first error. func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool { p.panicIfInitialized() ctx, cancel := context.WithCancel(ctx) return &ContextPool{ errorPool: p.deref(), ctx: ctx, cancel: cancel, } } // WithFirstError configures the pool to only return the first error // returned by a task. By default, Wait() will return a combined error. func (p *ErrorPool) WithFirstError() *ErrorPool { p.panicIfInitialized() p.onlyFirstError = true return p } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool { p.panicIfInitialized() p.pool.WithMaxGoroutines(n) return p } // deref is a helper that creates a shallow copy of the pool with the same // settings. We don't want to just dereference the pointer because that makes // the copylock lint angry. func (p *ErrorPool) deref() ErrorPool { return ErrorPool{ pool: p.pool.deref(), onlyFirstError: p.onlyFirstError, } } func (p *ErrorPool) panicIfInitialized() { p.pool.panicIfInitialized() } func (p *ErrorPool) addErr(err error) { if err != nil { p.mu.Lock() if p.onlyFirstError { if p.errs == nil { p.errs = err } } else { p.errs = multierror.Join(p.errs, err) } p.mu.Unlock() } } conc-0.3.0/pool/error_pool_test.go000066400000000000000000000050621437630426700171740ustar00rootroot00000000000000package pool import ( "errors" "fmt" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" ) func ExampleErrorPool() { p := New().WithErrors() for i := 0; i < 3; i++ { i := i p.Go(func() error { if i == 2 { return errors.New("oh no!") } return nil }) } err := p.Wait() fmt.Println(err) // Output: // oh no! } func TestErrorPool(t *testing.T) { t.Parallel() err1 := errors.New("err1") err2 := errors.New("err2") t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := New().WithErrors() g.Go(func() error { return nil }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := New().WithErrors() g.Go(func() error { return nil }) _ = g.Wait() require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("wait returns no error if no errors", func(t *testing.T) { t.Parallel() g := New().WithErrors() g.Go(func() error { return nil }) require.NoError(t, g.Wait()) }) t.Run("wait error if func returns error", func(t *testing.T) { t.Parallel() g := New().WithErrors() g.Go(func() error { return err1 }) require.ErrorIs(t, g.Wait(), err1) }) t.Run("wait error is all returned errors", func(t *testing.T) { t.Parallel() g := New().WithErrors() g.Go(func() error { return err1 }) g.Go(func() error { return nil }) g.Go(func() error { return err2 }) err := g.Wait() require.ErrorIs(t, err, err1) require.ErrorIs(t, err, err2) }) t.Run("propagates panics", func(t *testing.T) { t.Parallel() g := New().WithErrors() for i := 0; i < 10; i++ { i := i g.Go(func() error { if i == 5 { panic("fatal") } return nil }) } require.Panics(t, func() { _ = g.Wait() }) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxGoroutines := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxGoroutines), func(t *testing.T) { g := New().WithErrors().WithMaxGoroutines(maxGoroutines) var currentConcurrent atomic.Int64 taskCount := maxGoroutines * 10 for i := 0; i < taskCount; i++ { g.Go(func() error { cur := currentConcurrent.Add(1) if cur > int64(maxGoroutines) { return fmt.Errorf("expected no more than %d concurrent goroutine", maxGoroutines) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) return nil }) } require.NoError(t, g.Wait()) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) } conc-0.3.0/pool/pool.go000066400000000000000000000104431437630426700147230ustar00rootroot00000000000000package pool import ( "context" "sync" "github.com/sourcegraph/conc" ) // New creates a new Pool. func New() *Pool { return &Pool{} } // Pool is a pool of goroutines used to execute tasks concurrently. // // Tasks are submitted with Go(). Once all your tasks have been submitted, you // must call Wait() to clean up any spawned goroutines and propagate any // panics. // // Goroutines are started lazily, so creating a new pool is cheap. There will // never be more goroutines spawned than there are tasks submitted. // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. // // Pool is efficient, but not zero cost. It should not be used for very short // tasks. Startup and teardown come with an overhead of around 1µs, and each // task has an overhead of around 300ns. type Pool struct { handle conc.WaitGroup limiter limiter tasks chan func() initOnce sync.Once } // Go submits a task to be run in the pool. If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *Pool) Go(f func()) { p.init() if p.limiter == nil { // No limit on the number of goroutines. select { case p.tasks <- f: // A goroutine was available to handle the task. default: // No goroutine was available to handle the task. // Spawn a new one and send it the task. p.handle.Go(p.worker) p.tasks <- f } } else { select { case p.limiter <- struct{}{}: // If we are below our limit, spawn a new worker rather // than waiting for one to become available. p.handle.Go(p.worker) // We know there is at least one worker running, so wait // for it to become available. This ensures we never spawn // more workers than the number of tasks. p.tasks <- f case p.tasks <- f: // A worker is available and has accepted the task. return } } } // Wait cleans up spawned goroutines, propagating any panics that were // raised by a tasks. func (p *Pool) Wait() { p.init() close(p.tasks) p.handle.Wait() } // MaxGoroutines returns the maximum size of the pool. func (p *Pool) MaxGoroutines() int { return p.limiter.limit() } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *Pool) WithMaxGoroutines(n int) *Pool { p.panicIfInitialized() if n < 1 { panic("max goroutines in a pool must be greater than zero") } p.limiter = make(limiter, n) return p } // init ensures that the pool is initialized before use. This makes the // zero value of the pool usable. func (p *Pool) init() { p.initOnce.Do(func() { p.tasks = make(chan func()) }) } // panicIfInitialized will trigger a panic if a configuration method is called // after the pool has started any goroutines for the first time. In the case that // new settings are needed, a new pool should be created. func (p *Pool) panicIfInitialized() { if p.tasks != nil { panic("pool can not be reconfigured after calling Go() for the first time") } } // WithErrors converts the pool to an ErrorPool so the submitted tasks can // return errors. func (p *Pool) WithErrors() *ErrorPool { p.panicIfInitialized() return &ErrorPool{ pool: p.deref(), } } // deref is a helper that creates a shallow copy of the pool with the same // settings. We don't want to just dereference the pointer because that makes // the copylock lint angry. func (p *Pool) deref() Pool { p.panicIfInitialized() return Pool{ limiter: p.limiter, } } // WithContext converts the pool to a ContextPool for tasks that should // run under the same context, such that they each respect shared cancellation. // For example, WithCancelOnError can be configured on the returned pool to // signal that all goroutines should be cancelled upon the first error. func (p *Pool) WithContext(ctx context.Context) *ContextPool { p.panicIfInitialized() ctx, cancel := context.WithCancel(ctx) return &ContextPool{ errorPool: p.WithErrors().deref(), ctx: ctx, cancel: cancel, } } func (p *Pool) worker() { // The only time this matters is if the task panics. // This makes it possible to spin up new workers in that case. defer p.limiter.release() for f := range p.tasks { f() } } type limiter chan struct{} func (l limiter) limit() int { return cap(l) } func (l limiter) release() { if l != nil { <-l } } conc-0.3.0/pool/pool_test.go000066400000000000000000000053001437630426700157560ustar00rootroot00000000000000package pool import ( "fmt" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" ) func ExamplePool() { p := New().WithMaxGoroutines(3) for i := 0; i < 5; i++ { p.Go(func() { fmt.Println("conc") }) } p.Wait() // Output: // conc // conc // conc // conc // conc } func TestPool(t *testing.T) { t.Parallel() t.Run("basic", func(t *testing.T) { t.Parallel() g := New() var completed atomic.Int64 for i := 0; i < 100; i++ { g.Go(func() { time.Sleep(1 * time.Millisecond) completed.Add(1) }) } g.Wait() require.Equal(t, completed.Load(), int64(100)) }) t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := New() g.Go(func() {}) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := New() g.Go(func() {}) g.Wait() require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxConcurrent := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxConcurrent), func(t *testing.T) { g := New().WithMaxGoroutines(maxConcurrent) var currentConcurrent atomic.Int64 var errCount atomic.Int64 taskCount := maxConcurrent * 10 for i := 0; i < taskCount; i++ { g.Go(func() { cur := currentConcurrent.Add(1) if cur > int64(maxConcurrent) { errCount.Add(1) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) }) } g.Wait() require.Equal(t, int64(0), errCount.Load()) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) t.Run("propagate panic", func(t *testing.T) { t.Parallel() g := New() for i := 0; i < 10; i++ { i := i g.Go(func() { if i == 5 { panic(i) } }) } require.Panics(t, g.Wait) }) t.Run("panics do not exhaust goroutines", func(t *testing.T) { t.Parallel() g := New().WithMaxGoroutines(2) for i := 0; i < 10; i++ { g.Go(func() { panic(42) }) } require.Panics(t, g.Wait) }) t.Run("panics on invalid WithMaxGoroutines", func(t *testing.T) { t.Parallel() require.Panics(t, func() { New().WithMaxGoroutines(0) }) }) t.Run("returns correct MaxGoroutines", func(t *testing.T) { t.Parallel() p := New().WithMaxGoroutines(42) require.Equal(t, 42, p.MaxGoroutines()) }) } func BenchmarkPool(b *testing.B) { b.Run("startup and teardown", func(b *testing.B) { for i := 0; i < b.N; i++ { p := New() p.Go(func() {}) p.Wait() } }) b.Run("per task", func(b *testing.B) { p := New() f := func() {} for i := 0; i < b.N; i++ { p.Go(f) } p.Wait() }) } conc-0.3.0/pool/result_context_pool.go000066400000000000000000000046351437630426700200730ustar00rootroot00000000000000package pool import ( "context" ) // ResultContextPool is a pool that runs tasks that take a context and return a // result. The context passed to the task will be canceled if any of the tasks // return an error, which makes its functionality different than just capturing // a context with the task closure. // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. type ResultContextPool[T any] struct { contextPool ContextPool agg resultAggregator[T] collectErrored bool } // Go submits a task to the pool. If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) { p.contextPool.Go(func(ctx context.Context) error { res, err := f(ctx) if err == nil || p.collectErrored { p.agg.add(res) } return err }) } // Wait cleans up all spawned goroutines, propagates any panics, and // returns an error if any of the tasks errored. func (p *ResultContextPool[T]) Wait() ([]T, error) { err := p.contextPool.Wait() return p.agg.results, err } // WithCollectErrored configures the pool to still collect the result of a task // even if the task returned an error. By default, the result of tasks that errored // are ignored and only the error is collected. func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] { p.panicIfInitialized() p.collectErrored = true return p } // WithFirstError configures the pool to only return the first error // returned by a task. By default, Wait() will return a combined error. func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] { p.panicIfInitialized() p.contextPool.WithFirstError() return p } // WithCancelOnError configures the pool to cancel its context as soon as // any task returns an error. By default, the pool's context is not // canceled until the parent context is canceled. func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] { p.panicIfInitialized() p.contextPool.WithCancelOnError() return p } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] { p.panicIfInitialized() p.contextPool.WithMaxGoroutines(n) return p } func (p *ResultContextPool[T]) panicIfInitialized() { p.contextPool.panicIfInitialized() } conc-0.3.0/pool/result_context_pool_test.go000066400000000000000000000141211437630426700211210ustar00rootroot00000000000000package pool import ( "context" "errors" "fmt" "sort" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestResultContextPool(t *testing.T) { t.Parallel() err1 := errors.New("err1") err2 := errors.New("err2") t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()) g.Go(func(context.Context) (int, error) { return 0, nil }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()) g.Go(func(context.Context) (int, error) { return 0, nil }) _, _ = g.Wait() require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("behaves the same as ErrorGroup", func(t *testing.T) { t.Parallel() bgctx := context.Background() t.Run("wait returns no error if no errors", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(bgctx) g.Go(func(context.Context) (int, error) { return 0, nil }) res, err := g.Wait() require.Len(t, res, 1) require.NoError(t, err) }) t.Run("wait error if func returns error", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(bgctx) g.Go(func(context.Context) (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 0) require.ErrorIs(t, err, err1) }) t.Run("wait error is all returned errors", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors().WithContext(bgctx) g.Go(func(context.Context) (int, error) { return 0, err1 }) g.Go(func(context.Context) (int, error) { return 0, nil }) g.Go(func(context.Context) (int, error) { return 0, err2 }) res, err := g.Wait() require.Len(t, res, 1) require.ErrorIs(t, err, err1) require.ErrorIs(t, err, err2) }) }) t.Run("context cancel propagates", func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) g := NewWithResults[int]().WithContext(ctx) g.Go(func(ctx context.Context) (int, error) { <-ctx.Done() return 0, ctx.Err() }) cancel() res, err := g.Wait() require.Len(t, res, 0) require.ErrorIs(t, err, context.Canceled) }) t.Run("WithCancelOnError", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()).WithCancelOnError() g.Go(func(ctx context.Context) (int, error) { <-ctx.Done() return 0, ctx.Err() }) g.Go(func(ctx context.Context) (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 0) require.ErrorIs(t, err, context.Canceled) require.ErrorIs(t, err, err1) }) t.Run("WithCancelOnError and panic", func(t *testing.T) { t.Parallel() p := NewWithResults[int](). WithContext(context.Background()). WithCancelOnError() var cancelledTasks atomic.Int64 p.Go(func(ctx context.Context) (int, error) { <-ctx.Done() cancelledTasks.Add(1) return 0, ctx.Err() }) p.Go(func(ctx context.Context) (int, error) { <-ctx.Done() cancelledTasks.Add(1) return 0, ctx.Err() }) p.Go(func(ctx context.Context) (int, error) { panic("abort!") }) assert.Panics(t, func() { _, _ = p.Wait() }) assert.EqualValues(t, 2, cancelledTasks.Load()) }) t.Run("no WithCancelOnError", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()) g.Go(func(ctx context.Context) (int, error) { select { case <-ctx.Done(): return 0, ctx.Err() case <-time.After(10 * time.Millisecond): return 0, nil } }) g.Go(func(ctx context.Context) (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 1) require.NotErrorIs(t, err, context.Canceled) require.ErrorIs(t, err, err1) }) t.Run("WithCollectErrored", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()).WithCollectErrored() g.Go(func(context.Context) (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 1) // errored value is collected require.ErrorIs(t, err, err1) }) t.Run("WithFirstError", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithContext(context.Background()).WithFirstError() sync := make(chan struct{}) g.Go(func(ctx context.Context) (int, error) { defer close(sync) return 0, err1 }) g.Go(func(ctx context.Context) (int, error) { // This test has a race condition. After the first goroutine // completes, this goroutine is woken up because sync is closed. // However, this goroutine might be woken up before the error from // the first goroutine is registered. To prevent that, we sleep for // another 10 milliseconds, giving the other goroutine time to return // and register its error before this goroutine returns its error. <-sync time.Sleep(10 * time.Millisecond) return 0, err2 }) res, err := g.Wait() require.Len(t, res, 0) require.ErrorIs(t, err, err1) require.NotErrorIs(t, err, err2) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxConcurrency := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxConcurrency), func(t *testing.T) { maxConcurrency := maxConcurrency // copy t.Parallel() ctx := context.Background() g := NewWithResults[int]().WithContext(ctx).WithMaxGoroutines(maxConcurrency) var currentConcurrent atomic.Int64 taskCount := maxConcurrency * 10 expected := make([]int, taskCount) for i := 0; i < taskCount; i++ { i := i expected[i] = i g.Go(func(context.Context) (int, error) { cur := currentConcurrent.Add(1) if cur > int64(maxConcurrency) { return 0, fmt.Errorf("expected no more than %d concurrent goroutines", maxConcurrency) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) return i, nil }) } res, err := g.Wait() sort.Ints(res) require.Equal(t, expected, res) require.NoError(t, err) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) } conc-0.3.0/pool/result_error_pool.go000066400000000000000000000052211437630426700175300ustar00rootroot00000000000000package pool import ( "context" ) // ResultErrorPool is a pool that executes tasks that return a generic result // type and an error. Tasks are executed in the pool with Go(), then the // results of the tasks are returned by Wait(). // // The order of the results is not guaranteed to be the same as the order the // tasks were submitted. If your use case requires consistent ordering, // consider using the `stream` package or `Map` from the `iter` package. // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. type ResultErrorPool[T any] struct { errorPool ErrorPool agg resultAggregator[T] collectErrored bool } // Go submits a task to the pool. If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *ResultErrorPool[T]) Go(f func() (T, error)) { p.errorPool.Go(func() error { res, err := f() if err == nil || p.collectErrored { p.agg.add(res) } return err }) } // Wait cleans up any spawned goroutines, propagating any panics and // returning the results and any errors from tasks. func (p *ResultErrorPool[T]) Wait() ([]T, error) { err := p.errorPool.Wait() return p.agg.results, err } // WithCollectErrored configures the pool to still collect the result of a task // even if the task returned an error. By default, the result of tasks that errored // are ignored and only the error is collected. func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] { p.panicIfInitialized() p.collectErrored = true return p } // WithContext converts the pool to a ResultContextPool for tasks that should // run under the same context, such that they each respect shared cancellation. // For example, WithCancelOnError can be configured on the returned pool to // signal that all goroutines should be cancelled upon the first error. func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { p.panicIfInitialized() return &ResultContextPool[T]{ contextPool: *p.errorPool.WithContext(ctx), } } // WithFirstError configures the pool to only return the first error // returned by a task. By default, Wait() will return a combined error. func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] { p.panicIfInitialized() p.errorPool.WithFirstError() return p } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] { p.panicIfInitialized() p.errorPool.WithMaxGoroutines(n) return p } func (p *ResultErrorPool[T]) panicIfInitialized() { p.errorPool.panicIfInitialized() } conc-0.3.0/pool/result_error_pool_test.go000066400000000000000000000073201437630426700205710ustar00rootroot00000000000000package pool import ( "errors" "fmt" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" ) func TestResultErrorGroup(t *testing.T) { t.Parallel() err1 := errors.New("err1") err2 := errors.New("err2") t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors() g.Go(func() (int, error) { return 0, nil }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors() g.Go(func() (int, error) { return 0, nil }) _, _ = g.Wait() require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("wait returns no error if no errors", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors() g.Go(func() (int, error) { return 1, nil }) res, err := g.Wait() require.NoError(t, err) require.Equal(t, []int{1}, res) }) t.Run("wait error if func returns error", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors() g.Go(func() (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 0) // errored value is ignored require.ErrorIs(t, err, err1) }) t.Run("WithCollectErrored", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors().WithCollectErrored() g.Go(func() (int, error) { return 0, err1 }) res, err := g.Wait() require.Len(t, res, 1) // errored value is collected require.ErrorIs(t, err, err1) }) t.Run("WithFirstError", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors().WithFirstError() synchronizer := make(chan struct{}) g.Go(func() (int, error) { <-synchronizer // This test has an intrinsic race condition that can be reproduced // by adding a `defer time.Sleep(time.Second)` before the `defer // close(synchronizer)`. We cannot guarantee that the group processes // the return value of the second goroutine before the first goroutine // exits in response to synchronizer, so we add a sleep here to make // this race condition vanishingly unlikely. Note that this is a race // in the test, not in the library. time.Sleep(100 * time.Millisecond) return 0, err1 }) g.Go(func() (int, error) { defer close(synchronizer) return 0, err2 }) res, err := g.Wait() require.Len(t, res, 0) require.ErrorIs(t, err, err2) require.NotErrorIs(t, err, err1) }) t.Run("wait error is all returned errors", func(t *testing.T) { t.Parallel() g := NewWithResults[int]().WithErrors() g.Go(func() (int, error) { return 0, err1 }) g.Go(func() (int, error) { return 0, nil }) g.Go(func() (int, error) { return 0, err2 }) res, err := g.Wait() require.Len(t, res, 1) require.ErrorIs(t, err, err1) require.ErrorIs(t, err, err2) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxConcurrency := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxConcurrency), func(t *testing.T) { maxConcurrency := maxConcurrency // copy t.Parallel() g := NewWithResults[int]().WithErrors().WithMaxGoroutines(maxConcurrency) var currentConcurrent atomic.Int64 taskCount := maxConcurrency * 10 for i := 0; i < taskCount; i++ { g.Go(func() (int, error) { cur := currentConcurrent.Add(1) if cur > int64(maxConcurrency) { return 0, fmt.Errorf("expected no more than %d concurrent goroutine", maxConcurrency) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) return 0, nil }) } res, err := g.Wait() require.Len(t, res, taskCount) require.NoError(t, err) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) } conc-0.3.0/pool/result_pool.go000066400000000000000000000052721437630426700163250ustar00rootroot00000000000000package pool import ( "context" "sync" ) // NewWithResults creates a new ResultPool for tasks with a result of type T. // // The configuration methods (With*) will panic if they are used after calling // Go() for the first time. func NewWithResults[T any]() *ResultPool[T] { return &ResultPool[T]{ pool: *New(), } } // ResultPool is a pool that executes tasks that return a generic result type. // Tasks are executed in the pool with Go(), then the results of the tasks are // returned by Wait(). // // The order of the results is not guaranteed to be the same as the order the // tasks were submitted. If your use case requires consistent ordering, // consider using the `stream` package or `Map` from the `iter` package. type ResultPool[T any] struct { pool Pool agg resultAggregator[T] } // Go submits a task to the pool. If all goroutines in the pool // are busy, a call to Go() will block until the task can be started. func (p *ResultPool[T]) Go(f func() T) { p.pool.Go(func() { p.agg.add(f()) }) } // Wait cleans up all spawned goroutines, propagating any panics, and returning // a slice of results from tasks that did not panic. func (p *ResultPool[T]) Wait() []T { p.pool.Wait() return p.agg.results } // MaxGoroutines returns the maximum size of the pool. func (p *ResultPool[T]) MaxGoroutines() int { return p.pool.MaxGoroutines() } // WithErrors converts the pool to an ResultErrorPool so the submitted tasks // can return errors. func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] { p.panicIfInitialized() return &ResultErrorPool[T]{ errorPool: *p.pool.WithErrors(), } } // WithContext converts the pool to a ResultContextPool for tasks that should // run under the same context, such that they each respect shared cancellation. // For example, WithCancelOnError can be configured on the returned pool to // signal that all goroutines should be cancelled upon the first error. func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { p.panicIfInitialized() return &ResultContextPool[T]{ contextPool: *p.pool.WithContext(ctx), } } // WithMaxGoroutines limits the number of goroutines in a pool. // Defaults to unlimited. Panics if n < 1. func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] { p.panicIfInitialized() p.pool.WithMaxGoroutines(n) return p } func (p *ResultPool[T]) panicIfInitialized() { p.pool.panicIfInitialized() } // resultAggregator is a utility type that lets us safely append from multiple // goroutines. The zero value is valid and ready to use. type resultAggregator[T any] struct { mu sync.Mutex results []T } func (r *resultAggregator[T]) add(res T) { r.mu.Lock() r.results = append(r.results, res) r.mu.Unlock() } conc-0.3.0/pool/result_pool_test.go000066400000000000000000000040231437630426700173550ustar00rootroot00000000000000package pool import ( "fmt" "sort" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" ) func ExampleResultPool() { p := NewWithResults[int]() for i := 0; i < 10; i++ { i := i p.Go(func() int { return i * 2 }) } res := p.Wait() // Result order is nondeterministic, so sort them first sort.Ints(res) fmt.Println(res) // Output: // [0 2 4 6 8 10 12 14 16 18] } func TestResultGroup(t *testing.T) { t.Parallel() t.Run("panics on configuration after init", func(t *testing.T) { t.Run("before wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]() g.Go(func() int { return 0 }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) t.Run("after wait", func(t *testing.T) { t.Parallel() g := NewWithResults[int]() g.Go(func() int { return 0 }) require.Panics(t, func() { g.WithMaxGoroutines(10) }) }) }) t.Run("basic", func(t *testing.T) { t.Parallel() g := NewWithResults[int]() expected := []int{} for i := 0; i < 100; i++ { i := i expected = append(expected, i) g.Go(func() int { return i }) } res := g.Wait() sort.Ints(res) require.Equal(t, expected, res) }) t.Run("limit", func(t *testing.T) { t.Parallel() for _, maxGoroutines := range []int{1, 10, 100} { t.Run(strconv.Itoa(maxGoroutines), func(t *testing.T) { g := NewWithResults[int]().WithMaxGoroutines(maxGoroutines) var currentConcurrent atomic.Int64 var errCount atomic.Int64 taskCount := maxGoroutines * 10 expected := make([]int, taskCount) for i := 0; i < taskCount; i++ { i := i expected[i] = i g.Go(func() int { cur := currentConcurrent.Add(1) if cur > int64(maxGoroutines) { errCount.Add(1) } time.Sleep(time.Millisecond) currentConcurrent.Add(-1) return i }) } res := g.Wait() sort.Ints(res) require.Equal(t, expected, res) require.Equal(t, int64(0), errCount.Load()) require.Equal(t, int64(0), currentConcurrent.Load()) }) } }) } conc-0.3.0/stream/000077500000000000000000000000001437630426700137435ustar00rootroot00000000000000conc-0.3.0/stream/stream.go000066400000000000000000000104511437630426700155660ustar00rootroot00000000000000// Package stream provides a concurrent, ordered stream implementation. package stream import ( "sync" "github.com/sourcegraph/conc" "github.com/sourcegraph/conc/panics" "github.com/sourcegraph/conc/pool" ) // New creates a new Stream with default settings. func New() *Stream { return &Stream{ pool: *pool.New(), } } // Stream is used to execute a stream of tasks concurrently while maintaining // the order of the results. // // To use a stream, you submit some number of `Task`s, each of which // return a callback. Each task will be executed concurrently in the stream's // associated Pool, and the callbacks will be executed sequentially in the // order the tasks were submitted. // // Once all your tasks have been submitted, Wait() must be called to clean up // running goroutines and propagate any panics. // // In the case of panic during execution of a task or a callback, all other // tasks and callbacks will still execute. The panic will be propagated to the // caller when Wait() is called. // // A Stream is efficient, but not zero cost. It should not be used for very // short tasks. Startup and teardown adds an overhead of a couple of // microseconds, and the overhead for each task is roughly 500ns. It should be // good enough for any task that requires a network call. type Stream struct { pool pool.Pool callbackerHandle conc.WaitGroup queue chan callbackCh initOnce sync.Once } // Task is a task that is submitted to the stream. Submitted tasks will // be executed concurrently. It returns a callback that will be called after // the task has completed. type Task func() Callback // Callback is a function that is returned by a Task. Callbacks are // called in the same order that tasks are submitted. type Callback func() // Go schedules a task to be run in the stream's pool. All submitted tasks // will be executed concurrently in worker goroutines. Then, the callbacks // returned by the tasks will be executed in the order that the tasks were // submitted. All callbacks will be executed by the same goroutine, so no // synchronization is necessary between callbacks. If all goroutines in the // stream's pool are busy, a call to Go() will block until the task can be // started. func (s *Stream) Go(f Task) { s.init() // Get a channel from the cache. ch := getCh() // Queue the channel for the callbacker. s.queue <- ch // Submit the task for execution. s.pool.Go(func() { defer func() { // In the case of a panic from f, we don't want the callbacker to // starve waiting for a callback from this channel, so give it an // empty callback. if r := recover(); r != nil { ch <- func() {} panic(r) } }() // Run the task, sending its callback down this task's channel. callback := f() ch <- callback }) } // Wait signals to the stream that all tasks have been submitted. Wait will // not return until all tasks and callbacks have been run. func (s *Stream) Wait() { s.init() // Defer the callbacker cleanup so that it occurs even in the case // that one of the tasks panics and is propagated up by s.pool.Wait(). defer func() { close(s.queue) s.callbackerHandle.Wait() }() // Wait for all the workers to exit. s.pool.Wait() } func (s *Stream) WithMaxGoroutines(n int) *Stream { s.pool.WithMaxGoroutines(n) return s } func (s *Stream) init() { s.initOnce.Do(func() { s.queue = make(chan callbackCh, s.pool.MaxGoroutines()+1) // Start the callbacker. s.callbackerHandle.Go(s.callbacker) }) } // callbacker is responsible for calling the returned callbacks in the order // they were submitted. There is only a single instance of callbacker running. func (s *Stream) callbacker() { var panicCatcher panics.Catcher defer panicCatcher.Repanic() // For every scheduled task, read that tasks channel from the queue. for callbackCh := range s.queue { // Wait for the task to complete and get its callback from the channel. callback := <-callbackCh // Execute the callback (with panic protection). panicCatcher.Try(callback) // Return the channel to the pool of unused channels. putCh(callbackCh) } } type callbackCh chan func() var callbackChPool = sync.Pool{ New: func() any { return make(callbackCh, 1) }, } func getCh() callbackCh { return callbackChPool.Get().(callbackCh) } func putCh(ch callbackCh) { callbackChPool.Put(ch) } conc-0.3.0/stream/stream_test.go000066400000000000000000000052471437630426700166340ustar00rootroot00000000000000package stream import ( "fmt" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" ) func ExampleStream() { times := []int{20, 52, 16, 45, 4, 80} stream := New() for _, millis := range times { dur := time.Duration(millis) * time.Millisecond stream.Go(func() Callback { time.Sleep(dur) // This will print in the order the tasks were submitted return func() { fmt.Println(dur) } }) } stream.Wait() // Output: // 20ms // 52ms // 16ms // 45ms // 4ms // 80ms } func TestStream(t *testing.T) { t.Parallel() t.Run("simple", func(t *testing.T) { t.Parallel() s := New() var res []int for i := 0; i < 5; i++ { i := i s.Go(func() Callback { i *= 2 return func() { res = append(res, i) } }) } s.Wait() require.Equal(t, []int{0, 2, 4, 6, 8}, res) }) t.Run("max goroutines", func(t *testing.T) { t.Parallel() s := New().WithMaxGoroutines(5) var currentTaskCount atomic.Int64 var currentCallbackCount atomic.Int64 for i := 0; i < 50; i++ { s.Go(func() Callback { curr := currentTaskCount.Add(1) if curr > 5 { t.Fatal("too many concurrent tasks being executed") } defer currentTaskCount.Add(-1) time.Sleep(time.Millisecond) return func() { curr := currentCallbackCount.Add(1) if curr > 1 { t.Fatal("too many concurrent callbacks being executed") } time.Sleep(time.Millisecond) defer currentCallbackCount.Add(-1) } }) } s.Wait() }) t.Run("panic in task is propagated", func(t *testing.T) { t.Parallel() s := New().WithMaxGoroutines(5) s.Go(func() Callback { panic("something really bad happened in the task") }) require.Panics(t, s.Wait) }) t.Run("panic in callback is propagated", func(t *testing.T) { t.Parallel() s := New().WithMaxGoroutines(5) s.Go(func() Callback { return func() { panic("something really bad happened in the callback") } }) require.Panics(t, s.Wait) }) t.Run("panic in callback does not block producers", func(t *testing.T) { t.Parallel() s := New().WithMaxGoroutines(5) s.Go(func() Callback { return func() { panic("something really bad happened in the callback") } }) for i := 0; i < 100; i++ { s.Go(func() Callback { return func() {} }) } require.Panics(t, s.Wait) }) } func BenchmarkStream(b *testing.B) { b.Run("startup and teardown", func(b *testing.B) { for i := 0; i < b.N; i++ { s := New() s.Go(func() Callback { return func() {} }) s.Wait() } }) b.Run("per task", func(b *testing.B) { n := 0 s := New() for i := 0; i < b.N; i++ { s.Go(func() Callback { return func() { n += 1 } }) } s.Wait() }) } conc-0.3.0/waitgroup.go000066400000000000000000000025761437630426700150320ustar00rootroot00000000000000package conc import ( "sync" "github.com/sourcegraph/conc/panics" ) // NewWaitGroup creates a new WaitGroup. func NewWaitGroup() *WaitGroup { return &WaitGroup{} } // WaitGroup is the primary building block for scoped concurrency. // Goroutines can be spawned in the WaitGroup with the Go method, // and calling Wait() will ensure that each of those goroutines exits // before continuing. Any panics in a child goroutine will be caught // and propagated to the caller of Wait(). // // The zero value of WaitGroup is usable, just like sync.WaitGroup. // Also like sync.WaitGroup, it must not be copied after first use. type WaitGroup struct { wg sync.WaitGroup pc panics.Catcher } // Go spawns a new goroutine in the WaitGroup. func (h *WaitGroup) Go(f func()) { h.wg.Add(1) go func() { defer h.wg.Done() h.pc.Try(f) }() } // Wait will block until all goroutines spawned with Go exit and will // propagate any panics spawned in a child goroutine. func (h *WaitGroup) Wait() { h.wg.Wait() // Propagate a panic if we caught one from a child goroutine. h.pc.Repanic() } // WaitAndRecover will block until all goroutines spawned with Go exit and // will return a *panics.Recovered if one of the child goroutines panics. func (h *WaitGroup) WaitAndRecover() *panics.Recovered { h.wg.Wait() // Return a recovered panic if we caught one from a child goroutine. return h.pc.Recovered() } conc-0.3.0/waitgroup_test.go000066400000000000000000000054221437630426700160620ustar00rootroot00000000000000package conc import ( "fmt" "sync/atomic" "testing" "github.com/stretchr/testify/require" ) func ExampleWaitGroup() { var count atomic.Int64 var wg WaitGroup for i := 0; i < 10; i++ { wg.Go(func() { count.Add(1) }) } wg.Wait() fmt.Println(count.Load()) // Output: // 10 } func ExampleWaitGroup_WaitAndRecover() { var wg WaitGroup wg.Go(func() { panic("super bad thing") }) recoveredPanic := wg.WaitAndRecover() fmt.Println(recoveredPanic.Value) // Output: // super bad thing } func TestWaitGroup(t *testing.T) { t.Parallel() t.Run("ctor", func(t *testing.T) { t.Parallel() wg := NewWaitGroup() require.IsType(t, &WaitGroup{}, wg) }) t.Run("all spawned run", func(t *testing.T) { t.Parallel() var count atomic.Int64 var wg WaitGroup for i := 0; i < 100; i++ { wg.Go(func() { count.Add(1) }) } wg.Wait() require.Equal(t, count.Load(), int64(100)) }) t.Run("panic", func(t *testing.T) { t.Parallel() t.Run("is propagated", func(t *testing.T) { t.Parallel() var wg WaitGroup wg.Go(func() { panic("super bad thing") }) require.Panics(t, wg.Wait) }) t.Run("one is propagated", func(t *testing.T) { t.Parallel() var wg WaitGroup wg.Go(func() { panic("super bad thing") }) wg.Go(func() { panic("super badder thing") }) require.Panics(t, wg.Wait) }) t.Run("non-panics do not overwrite panic", func(t *testing.T) { t.Parallel() var wg WaitGroup wg.Go(func() { panic("super bad thing") }) for i := 0; i < 10; i++ { wg.Go(func() {}) } require.Panics(t, wg.Wait) }) t.Run("non-panics run successfully", func(t *testing.T) { t.Parallel() var wg WaitGroup var i atomic.Int64 wg.Go(func() { i.Add(1) }) wg.Go(func() { panic("super bad thing") }) wg.Go(func() { i.Add(1) }) require.Panics(t, wg.Wait) require.Equal(t, int64(2), i.Load()) }) t.Run("is caught by waitandrecover", func(t *testing.T) { t.Parallel() var wg WaitGroup wg.Go(func() { panic("super bad thing") }) p := wg.WaitAndRecover() require.Equal(t, p.Value, "super bad thing") }) t.Run("one is caught by waitandrecover", func(t *testing.T) { t.Parallel() var wg WaitGroup wg.Go(func() { panic("super bad thing") }) wg.Go(func() { panic("super badder thing") }) p := wg.WaitAndRecover() require.NotNil(t, p) }) t.Run("nonpanics run successfully with waitandrecover", func(t *testing.T) { t.Parallel() var wg WaitGroup var i atomic.Int64 wg.Go(func() { i.Add(1) }) wg.Go(func() { panic("super bad thing") }) wg.Go(func() { i.Add(1) }) p := wg.WaitAndRecover() require.Equal(t, p.Value, "super bad thing") require.Equal(t, int64(2), i.Load()) }) }) }