pax_global_header00006660000000000000000000000064144767243770014537gustar00rootroot0000000000000052 comment=b0e46059265dcc795d35a15e86f44ee848e25252 golang-github-cowsql-go-cowsql-1.22.0/000077500000000000000000000000001447672437700175675ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/.dir-locals.el000066400000000000000000000003771447672437700222270ustar00rootroot00000000000000;;; Directory Local Variables ;;; For more information see (info "(emacs) Directory Variables") ((go-mode . ((go-test-args . "-tags libsqlite3 -timeout 90s") (eval . (set (make-local-variable 'flycheck-go-build-tags) '("libsqlite3")))))) golang-github-cowsql-go-cowsql-1.22.0/.github/000077500000000000000000000000001447672437700211275ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/.github/workflows/000077500000000000000000000000001447672437700231645ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/.github/workflows/build-and-test.yml000066400000000000000000000041441447672437700265260ustar00rootroot00000000000000name: CI tests on: - push - pull_request jobs: build-and-test: strategy: fail-fast: false matrix: go: - 1.20.x os: - ubuntu-22.04 runs-on: ${{ matrix.os }} steps: - name: Checkout code uses: actions/checkout@v3 - name: Install Go uses: actions/setup-go@v3 with: go-version: ${{ matrix.go }} - name: Setup dependencies run: | sudo add-apt-repository ppa:cowsql/main -y sudo apt update sudo apt install -y golint libsqlite3-dev libuv1-dev liblz4-dev libraft-dev libcowsql-dev go get github.com/go-playground/overalls - name: Build & Test env: CGO_LDFLAGS_ALLOW: "-Wl,-z,now" run: | go version go get -t -tags libsqlite3 ./... go vet -tags libsqlite3 ./... golint export GO_COWSQL_MULTITHREAD=1 go test -v -race -coverprofile=coverage.out ./... go test -v -tags nosqlite3 ./... VERBOSE=1 ./test/cowsql-demo.sh VERBOSE=1 ./test/roles.sh VERBOSE=1 ./test/recover.sh - name: Coverage uses: shogo82148/actions-goveralls@v1 with: path-to-profile: coverage.out - name: Benchmark env: CGO_LDFLAGS_ALLOW: "-Wl,-z,now" GO_COWSQL_MULTITHREAD: 1 run: | go install -tags libsqlite3 github.com/cowsql/go-cowsql/cmd/cowsql-benchmark cowsql-benchmark --db 127.0.0.1:9001 --driver --cluster 127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003 --workload kvreadwrite & masterpid=$! cowsql-benchmark --db 127.0.0.1:9002 --join 127.0.0.1:9001 & cowsql-benchmark --db 127.0.0.1:9003 --join 127.0.0.1:9001 & wait $masterpid echo "Write results:" head -n 5 /tmp/cowsql-benchmark/127.0.0.1:9001/results/0-exec-* echo "" echo "Read results:" head -n 5 /tmp/cowsql-benchmark/127.0.0.1:9001/results/0-query-* - uses: actions/upload-artifact@v3 with: name: cowsql-benchmark-${{ matrix.os }}-${{ matrix.go }} path: /tmp/cowsql-benchmark/127.0.0.1:9001/results/* golang-github-cowsql-go-cowsql-1.22.0/.github/workflows/daily-benchmark.yml000066400000000000000000000026421447672437700267450ustar00rootroot00000000000000name: Daily benchmark on: schedule: - cron: "0 12 * * *" jobs: benchmark: runs-on: ubuntu-22.04 steps: - name: Checkout code uses: actions/checkout@v3 - name: Install Go uses: actions/setup-go@v3 with: go-version: 1.18.x - name: Setup dependencies run: | sudo add-apt-repository ppa:cowsql/main -y sudo apt update sudo apt install -y libsqlite3-dev libuv1-dev liblz4-dev libraft-dev libcowsql-dev - name: Build & Benchmark env: CGO_LDFLAGS_ALLOW: "-Wl,-z,now" GO_DQLITE_MULTITHREAD: 1 run: | go get -t -tags libsqlite3 ./... go install -tags libsqlite3 github.com/cowsql/go-cowsql/cmd/cowsql-benchmark cowsql-benchmark --db 127.0.0.1:9001 --duration 3600 --driver --cluster 127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003 --workload kvreadwrite & masterpid=$! cowsql-benchmark --db 127.0.0.1:9002 --join 127.0.0.1:9001 & cowsql-benchmark --db 127.0.0.1:9003 --join 127.0.0.1:9001 & wait $masterpid echo "Write results:" head -n 5 /tmp/cowsql-benchmark/127.0.0.1:9001/results/0-exec-* echo "" echo "Read results:" head -n 5 /tmp/cowsql-benchmark/127.0.0.1:9001/results/0-query-* - uses: actions/upload-artifact@v3 with: name: cowsql-daily-benchmark path: /tmp/cowsql-benchmark/127.0.0.1:9001/results/* golang-github-cowsql-go-cowsql-1.22.0/.gitignore000066400000000000000000000001641447672437700215600ustar00rootroot00000000000000.sqlite cmd/cowsql/cowsql cmd/cowsql-demo/cowsql-demo cowsql cowsql-demo profile.coverprofile overalls.coverprofile golang-github-cowsql-go-cowsql-1.22.0/AUTHORS000066400000000000000000000003631447672437700206410ustar00rootroot00000000000000Unless mentioned otherwise in a specific file's header, all code in this project is released under the Apache 2.0 license. The list of authors and contributors can be retrieved from the git commit history and in some cases, the file headers. golang-github-cowsql-go-cowsql-1.22.0/LICENSE000066400000000000000000000261351447672437700206030ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-github-cowsql-go-cowsql-1.22.0/README.md000066400000000000000000000122721447672437700210520ustar00rootroot00000000000000go-cowsql [![CI tests](https://github.com/cowsql/go-cowsql/actions/workflows/build-and-test.yml/badge.svg)](https://github.com/cowsql/go-cowsql/actions/workflows/build-and-test.yml) [![Coverage Status](https://coveralls.io/repos/github/cowsql/go-cowsql/badge.svg?branch=main)](https://coveralls.io/github/cowsql/go-cowsql?branch=main) [![Go Report Card](https://goreportcard.com/badge/github.com/cowsql/go-cowsql)](https://goreportcard.com/report/github.com/cowsql/go-cowsql) [![GoDoc](https://godoc.org/github.com/cowsql/go-cowsql?status.svg)](https://godoc.org/github.com/cowsql/go-cowsql) ====== This repository provides the `go-cowsql` Go package, containing bindings for the [cowsql](https://github.com/cowsql/cowsql) C library and a pure-Go client for the cowsql wire [protocol](https://github.com/cowsql/cowsql/blob/main/doc/protocol.md). Fork of Canonical go-dqlite --------------------------- These bindings are a cowsql-oriented fork of Canonical's [go-dqlite](https://github.com/canonical/go-dqlite) ones, which were originally written by cowsql's author [himself](https://github.com/canonical/go-dqlite/commits?author=freeekanayaka) while working at Canonical. Usage ----- The best way to understand how to use the ```go-cowsql``` package is probably by looking at the source code of the [demo program](https://github.com/cowsql/go-cowsql/blob/main/cmd/cowsql-demo/cowsql-demo.go) and use it as example. In general your application will use code such as: ```go dir := "/path/to/data/directory" address := "1.2.3.4:666" // Unique node address cluster := []string{...} // Optional list of existing nodes, when starting a new node app, err := app.New(dir, app.WithAddress(address), app.WithCluster(cluster)) if err != nil { // ... } db, err := app.Open(context.Background(), "my-database") if err != nil { // ... } // db is a *sql.DB object if _, err := db.Exec("CREATE TABLE my_table (n INT)"); err != nil // ... } ``` Build ----- In order to use the go-cowsql package in your application, you'll need to have the [cowsql](https://github.com/cowsql/cowsql) C library installed on your system, along with its dependencies. By default, go-cowsql's `client` module supports storing a cache of the cluster's state in a SQLite database, locally on each cluster member. (This is not to be confused with any SQLite databases that are managed by cowsql.) In order to do this, it imports https://github.com/mattn/go-sqlite3, and so you can use the `libsqlite3` build tag to control whether go-sqlite3 links to a system libsqlite3 or builds its own. You can also disable support for SQLite node stores entirely with the `nosqlite3` build tag (unique to go-cowsql). If you pass this tag, your application will not link *directly* to libsqlite3 (but it will still link it *indirectly* via libcowsql, unless you've dropped the sqlite3.c amalgamation into the cowsql build). Documentation ------------- The documentation for this package can be found on [pkg.go.dev](https://pkg.go.dev/github.com/cowsql/go-cowsql). Demo ---- To see cowsql in action, either install the Debian package from the PPA: ```bash sudo add-apt-repository -y ppa:cowsql/master sudo apt install cowsql libcowsql-dev ``` or build the cowsql C library and its dependencies from source, as described [here](https://github.com/cowsql/cowsql#build), and then run: ``` go install -tags libsqlite3 ./cmd/cowsql-demo ``` from the top-level directory of this repository. This builds a demo cowsql application, which exposes a simple key/value store over an HTTP API. Once the `cowsql-demo` binary is installed (normally under `~/go/bin` or `/usr/bin/`), start three nodes of the demo application: ```bash cowsql-demo --api 127.0.0.1:8001 --db 127.0.0.1:9001 & cowsql-demo --api 127.0.0.1:8002 --db 127.0.0.1:9002 --join 127.0.0.1:9001 & cowsql-demo --api 127.0.0.1:8003 --db 127.0.0.1:9003 --join 127.0.0.1:9001 & ``` The `--api` flag tells the demo program where to expose its HTTP API. The `--db` flag tells the demo program to use the given address for internal database replication. The `--join` flag is optional and should be used only for additional nodes after the first one. It informs them about the existing cluster, so they can automatically join it. Now we can start using the cluster. Let's insert a key pair: ```bash curl -X PUT -d my-value http://127.0.0.1:8001/my-key ``` and then retrieve it from the database: ```bash curl http://127.0.0.1:8001/my-key ``` Currently the first node is the leader. If we stop it and then try to query the key again curl will fail, but we can simply change the endpoint to another node and things will work since an automatic failover has taken place: ```bash kill -TERM %1; curl http://127.0.0.1:8002/my-key ``` Shell ------ A basic SQLite-like cowsql shell is available in the `cowsql-tools` package or can be built with: ``` go install -tags libsqlite3 ./cmd/cowsql ``` ``` Usage: cowsql -s [command] [flags] ``` Example usage in the case of the `cowsql-demo` example listed above: ``` cowsql -s 127.0.0.1:9001 demo cowsql> SELECT * FROM model; my-key|my-value ``` The shell supports normal SQL queries plus the special `.cluster` and `.leader` commands to inspect the cluster members and the current leader. golang-github-cowsql-go-cowsql-1.22.0/app/000077500000000000000000000000001447672437700203475ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/app/app.go000066400000000000000000000462331447672437700214660ustar00rootroot00000000000000package app import ( "context" "crypto/tls" "database/sql" "fmt" "net" "os" "path/filepath" "runtime" "sync" "sync/atomic" "time" "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/driver" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) // used to create a unique driver name, MUST be modified atomically // https://pkg.go.dev/sync/atomic#AddInt64 var driverIndex int64 // App is a high-level helper for initializing a typical cowsql-based Go // application. // // It takes care of starting a cowsql node and registering a cowsql Go SQL // driver. type App struct { id uint64 address string dir string node *cowsql.Node nodeBindAddress string listener net.Listener tls *tlsSetup dialFunc client.DialFunc store client.NodeStore driver *driver.Driver driverName string log client.LogFunc ctx context.Context stop context.CancelFunc // Signal App.run() to stop. proxyCh chan struct{} // Waits for App.proxy() to return. runCh chan struct{} // Waits for App.run() to return. readyCh chan struct{} // Waits for startup tasks voters int standbys int roles RolesConfig } // New creates a new application node. func New(dir string, options ...Option) (app *App, err error) { o := defaultOptions() for _, option := range options { option(o) } var nodeBindAddress string if o.Conn != nil { listener, err := net.Listen("unix", o.UnixSocket) if err != nil { return nil, fmt.Errorf("failed to autobind unix socket: %w", err) } nodeBindAddress = listener.Addr().String() listener.Close() } // List of cleanup functions to run in case of errors. cleanups := []func(){} defer func() { if err == nil { return } for i := range cleanups { i = len(cleanups) - 1 - i // Reverse order cleanups[i]() } }() // Load our ID, or generate one if we are joining. info := client.NodeInfo{} infoFileExists, err := fileExists(dir, infoFile) if err != nil { return nil, err } if !infoFileExists { if o.Address == "" { if o.Address, err = defaultAddress(); err != nil { return nil, err } } if len(o.Cluster) == 0 { info.ID = cowsql.BootstrapID } else { info.ID = cowsql.GenerateID(o.Address) if err := fileWrite(dir, joinFile, []byte{}); err != nil { return nil, err } } info.Address = o.Address if err := fileMarshal(dir, infoFile, info); err != nil { return nil, err } cleanups = append(cleanups, func() { fileRemove(dir, infoFile) }) } else { if err := fileUnmarshal(dir, infoFile, &info); err != nil { return nil, err } if o.Address != "" && o.Address != info.Address { return nil, fmt.Errorf("address %q in info.yaml does not match %q", info.Address, o.Address) } } joinFileExists, err := fileExists(dir, joinFile) if err != nil { return nil, err } if info.ID == cowsql.BootstrapID && joinFileExists { return nil, fmt.Errorf("bootstrap node can't join a cluster") } // Open the nodes store. storeFileExists, err := fileExists(dir, storeFile) if err != nil { return nil, err } store, err := client.NewYamlNodeStore(filepath.Join(dir, storeFile)) if err != nil { return nil, fmt.Errorf("open cluster.yaml node store: %w", err) } // The info file and the store file should both exists or none of them // exist. if infoFileExists != storeFileExists { return nil, fmt.Errorf("inconsistent info.yaml and cluster.yaml") } if !storeFileExists { // If this is a brand new application node, populate the store // either with the node's address (for bootstrap nodes) or with // the given cluster addresses (for joining nodes). nodes := []client.NodeInfo{} if info.ID == cowsql.BootstrapID { nodes = append(nodes, client.NodeInfo{Address: info.Address}) } else { if len(o.Cluster) == 0 { return nil, fmt.Errorf("no cluster addresses provided") } for _, address := range o.Cluster { nodes = append(nodes, client.NodeInfo{Address: address}) } } if err := store.Set(context.Background(), nodes); err != nil { return nil, fmt.Errorf("initialize node store: %w", err) } cleanups = append(cleanups, func() { fileRemove(dir, storeFile) }) } // Start the local cowsql engine. ctx, stop := context.WithCancel(context.Background()) var nodeDial client.DialFunc if o.Conn != nil { nodeDial = extDialFuncWithProxy(ctx, o.Conn.dialFunc) } else if o.TLS != nil { nodeBindAddress = fmt.Sprintf("@cowsql-%d", info.ID) // Within a snap we need to choose a different name for the abstract unix domain // socket to get it past the AppArmor confinement. // See https://github.com/snapcore/snapd/blob/master/interfaces/apparmor/template.go#L357 snapInstanceName := os.Getenv("SNAP_INSTANCE_NAME") if len(snapInstanceName) > 0 { nodeBindAddress = fmt.Sprintf("@snap.%s.cowsql-%d", snapInstanceName, info.ID) } nodeDial = makeNodeDialFunc(ctx, o.TLS.Dial) } else { nodeBindAddress = info.Address nodeDial = client.DefaultDialFunc } node, err := cowsql.New( info.ID, info.Address, dir, cowsql.WithBindAddress(nodeBindAddress), cowsql.WithDialFunc(nodeDial), cowsql.WithFailureDomain(o.FailureDomain), cowsql.WithNetworkLatency(o.NetworkLatency), cowsql.WithSnapshotParams(o.SnapshotParams), cowsql.WithAutoRecovery(o.AutoRecovery), ) if err != nil { stop() return nil, fmt.Errorf("create node: %w", err) } if err := node.Start(); err != nil { stop() return nil, fmt.Errorf("start node: %w", err) } cleanups = append(cleanups, func() { node.Close() }) // Register the local cowsql driver. driverDial := client.DefaultDialFunc if o.TLS != nil { driverDial = client.DialFuncWithTLS(driverDial, o.TLS.Dial) } else if o.Conn != nil { driverDial = o.Conn.dialFunc } driver, err := driver.New( store, driver.WithDialFunc(driverDial), driver.WithLogFunc(o.Log), driver.WithTracing(o.Tracing), ) if err != nil { stop() return nil, fmt.Errorf("create driver: %w", err) } driverName := fmt.Sprintf("cowsql-%d", atomic.AddInt64(&driverIndex, 1)) sql.Register(driverName, driver) if o.Voters < 3 || o.Voters%2 == 0 { stop() return nil, fmt.Errorf("invalid voters %d: must be an odd number greater than 1", o.Voters) } if runtime.GOOS != "linux" && nodeBindAddress[0] == '@' { // Do not use abstract socket on other platforms and left trim "@" nodeBindAddress = nodeBindAddress[1:] } app = &App{ id: info.ID, address: info.Address, dir: dir, node: node, nodeBindAddress: nodeBindAddress, store: store, dialFunc: driverDial, driver: driver, driverName: driverName, log: o.Log, tls: o.TLS, ctx: ctx, stop: stop, runCh: make(chan struct{}, 0), readyCh: make(chan struct{}, 0), voters: o.Voters, standbys: o.StandBys, roles: RolesConfig{Voters: o.Voters, StandBys: o.StandBys}, } // Start the proxy if a TLS configuration was provided. if o.TLS != nil { listener, err := net.Listen("tcp", info.Address) if err != nil { return nil, fmt.Errorf("listen to %s: %w", info.Address, err) } proxyCh := make(chan struct{}, 0) app.listener = listener app.proxyCh = proxyCh go app.proxy() cleanups = append(cleanups, func() { listener.Close(); <-proxyCh }) } else if o.Conn != nil { go func() { for { remote := <-o.Conn.acceptCh // keep forward compatible _, isTcp := remote.(*net.TCPConn) _, isTLS := remote.(*tls.Conn) if isTcp || isTLS { // Write the status line and upgrade header by hand since w.WriteHeader() would fail after Hijack(). data := []byte("HTTP/1.1 101 Switching Protocols\r\nUpgrade: cowsql\r\n\r\n") n, err := remote.Write(data) if err != nil || n != len(data) { remote.Close() panic(fmt.Errorf("failed to write connection header: %w", err)) } } local, err := net.Dial("unix", nodeBindAddress) if err != nil { remote.Close() panic(fmt.Errorf("failed to connect to bind address %q: %w", nodeBindAddress, err)) } go proxy(app.ctx, remote, local, nil) } }() } go app.run(ctx, o.RolesAdjustmentFrequency, joinFileExists) return app, nil } // Handover transfers all responsibilities for this node (such has leadership // and voting rights) to another node, if one is available. // // This method should always be called before invoking Close(), in order to // gracefully shutdown a node. func (a *App) Handover(ctx context.Context) error { // Set a hard limit of one minute, in case the user-provided context // has no expiration. That avoids the call to stop responding forever // in case a majority of the cluster is down and no leader is available. // Watch out when removing or editing this context, the for loop at the // end of this function will possibly run "forever" without it. var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, time.Minute) defer cancel() cli, err := a.Leader(ctx) if err != nil { return fmt.Errorf("find leader: %w", err) } defer cli.Close() // Possibly transfer our role. nodes, err := cli.Cluster(ctx) if err != nil { return fmt.Errorf("cluster servers: %w", err) } changes := a.makeRolesChanges(nodes) role, candidates := changes.Handover(a.id) if role != -1 { for i, node := range candidates { if err := cli.Assign(ctx, node.ID, role); err != nil { a.warn("promote %s from %s to %s: %v", node.Address, node.Role, role, err) if i == len(candidates)-1 { // We could not promote any node return fmt.Errorf("could not promote any online node to %s", role) } continue } a.debug("promoted %s from %s to %s", node.Address, node.Role, role) break } } // Check if we are the current leader and transfer leadership if so. leader, err := cli.Leader(ctx) if err != nil { return fmt.Errorf("leader address: %w", err) } if leader != nil && leader.Address == a.address { nodes, err := cli.Cluster(ctx) if err != nil { return fmt.Errorf("cluster servers: %w", err) } changes := a.makeRolesChanges(nodes) voters := changes.list(client.Voter, true) for i, voter := range voters { if voter.Address == a.address { continue } if err := cli.Transfer(ctx, voter.ID); err != nil { a.warn("transfer leadership to %s: %v", voter.Address, err) if i == len(voters)-1 { return fmt.Errorf("transfer leadership: %w", err) } } cli, err = a.Leader(ctx) if err != nil { return fmt.Errorf("find new leader: %w", err) } defer cli.Close() break } } // Demote ourselves if we have promoted someone else. if role != -1 { // Try a while before failing. The new leader has to possibly commit an entry // from its new term in order to commit the last configuration change, wait a bit // for that to happen and don't fail immediately for { err = cli.Assign(ctx, a.ID(), client.Spare) if err == nil { return nil } select { case <-ctx.Done(): return fmt.Errorf("demote ourselves context done: %w", err) default: // Wait a bit before trying again time.Sleep(time.Second) continue } } } return nil } // Close the application node, releasing all resources it created. func (a *App) Close() error { // Stop the run goroutine. a.stop() <-a.runCh if a.listener != nil { a.listener.Close() <-a.proxyCh } if err := a.node.Close(); err != nil { return err } return nil } // ID returns the cowsql ID of this application node. func (a *App) ID() uint64 { return a.id } // Address returns the cowsql address of this application node. func (a *App) Address() string { return a.address } // Driver returns the name used to register the cowsql driver. func (a *App) Driver() string { return a.driverName } // Ready can be used to wait for a node to complete some initial tasks that are // initiated at startup. For example a brand new node will attempt to join the // cluster, a restarted node will check if it should assume some particular // role, etc. // // If this method returns without error it means that those initial tasks have // succeeded and follow-up operations like Open() are more likely to succeeed // quickly. func (a *App) Ready(ctx context.Context) error { select { case <-a.readyCh: return nil case <-ctx.Done(): return ctx.Err() } } // Open the cowsql database with the given name func (a *App) Open(ctx context.Context, database string) (*sql.DB, error) { db, err := sql.Open(a.Driver(), database) if err != nil { return nil, err } for i := 0; i < 60; i++ { err = db.PingContext(ctx) if err == nil { break } cause := errors.Cause(err) if cause != driver.ErrNoAvailableLeader { return nil, err } time.Sleep(time.Second) } if err != nil { return nil, err } return db, nil } // Leader returns a client connected to the current cluster leader, if any. func (a *App) Leader(ctx context.Context) (*client.Client, error) { return client.FindLeader(ctx, a.store, a.clientOptions()...) } // Client returns a client connected to the local node. func (a *App) Client(ctx context.Context) (*client.Client, error) { return client.New(ctx, a.nodeBindAddress) } // Proxy incoming TLS connections. func (a *App) proxy() { wg := sync.WaitGroup{} ctx, cancel := context.WithCancel(a.ctx) for { client, err := a.listener.Accept() if err != nil { cancel() wg.Wait() close(a.proxyCh) return } address := client.RemoteAddr() a.debug("new connection from %s", address) server, err := net.Dial("unix", a.nodeBindAddress) if err != nil { a.error("dial local node: %v", err) client.Close() continue } wg.Add(1) go func() { defer wg.Done() if err := proxy(ctx, client, server, a.tls.Listen); err != nil { a.error("proxy: %v", err) } }() } } // Run background tasks. The join flag is true if the node is a brand new one // and should join the cluster. func (a *App) run(ctx context.Context, frequency time.Duration, join bool) { defer close(a.runCh) delay := time.Duration(0) ready := false for { select { case <-ctx.Done(): // If we didn't become ready yet, close the ready // channel, to unblock any call to Ready(). if !ready { close(a.readyCh) } return case <-time.After(delay): cli, err := a.Leader(ctx) if err != nil { continue } // Attempt to join the cluster if this is a brand new node. if join { info := client.NodeInfo{ID: a.id, Address: a.address, Role: client.Spare} if err := cli.Add(ctx, info); err != nil { a.warn("join cluster: %v", err) delay = time.Second cli.Close() continue } join = false if err := fileRemove(a.dir, joinFile); err != nil { a.error("remove join file: %v", err) } } // Refresh our node store. servers, err := cli.Cluster(ctx) if err != nil { cli.Close() continue } if len(servers) == 0 { a.warn("server list empty") cli.Close() continue } a.store.Set(ctx, servers) // If we are starting up, let's see if we should // promote ourselves. if !ready { if err := a.maybePromoteOurselves(ctx, cli, servers); err != nil { a.warn("%v", err) delay = time.Second cli.Close() continue } ready = true delay = frequency close(a.readyCh) cli.Close() continue } // If we are the leader, let's see if there's any // adjustment we should make to node roles. if err := a.maybeAdjustRoles(ctx, cli); err != nil { a.warn("adjust roles: %v", err) } cli.Close() } } } // Possibly change our own role at startup. func (a *App) maybePromoteOurselves(ctx context.Context, cli *client.Client, nodes []client.NodeInfo) error { roles := a.makeRolesChanges(nodes) role := roles.Assume(a.id) if role == -1 { return nil } // Promote ourselves. if err := cli.Assign(ctx, a.id, role); err != nil { return fmt.Errorf("assign %s role to ourselves: %v", role, err) } // Possibly try to promote another node as well if we've reached the 3 // node threshold. If we don't succeed in doing that, errors are // ignored since the leader will eventually notice that don't have // enough voters and will retry. if role == client.Voter && roles.count(client.Voter, true) == 1 { for node := range roles.State { if node.ID == a.id || node.Role == client.Voter { continue } if err := cli.Assign(ctx, node.ID, client.Voter); err == nil { break } else { a.warn("promote %s from %s to voter: %v", node.Address, node.Role, err) } } } return nil } // Check if any adjustment needs to be made to existing roles. func (a *App) maybeAdjustRoles(ctx context.Context, cli *client.Client) error { again: info, err := cli.Leader(ctx) if err != nil { return err } if info.ID != a.id { return nil } nodes, err := cli.Cluster(ctx) if err != nil { return err } roles := a.makeRolesChanges(nodes) role, nodes := roles.Adjust(a.id) if role == -1 { return nil } for i, node := range nodes { if err := cli.Assign(ctx, node.ID, role); err != nil { a.warn("change %s from %s to %s: %v", node.Address, node.Role, role, err) if i == len(nodes)-1 { // We could not change any node return fmt.Errorf("could not assign role %s to any node", role) } continue } break } goto again } // Probe all given nodes for connectivity and metadata, then return a // RolesChanges object. func (a *App) makeRolesChanges(nodes []client.NodeInfo) RolesChanges { state := map[client.NodeInfo]*client.NodeMetadata{} for _, node := range nodes { state[node] = nil } var ( mtx sync.Mutex // Protects state map wg sync.WaitGroup // Wait for all probes to finish nProbes = runtime.NumCPU() sem = semaphore.NewWeighted(int64(nProbes)) // Limit number of parallel probes ) for _, node := range nodes { wg.Add(1) // sem.Acquire will not block forever because the goroutines // that release the semaphore will eventually timeout. if err := sem.Acquire(context.Background(), 1); err != nil { a.warn("failed to acquire semaphore: %v", err) wg.Done() continue } go func(node protocol.NodeInfo) { defer wg.Done() defer sem.Release(1) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() cli, err := client.New(ctx, node.Address, a.clientOptions()...) if err == nil { metadata, err := cli.Describe(ctx) if err == nil { mtx.Lock() state[node] = metadata mtx.Unlock() } cli.Close() } }(node) } wg.Wait() return RolesChanges{Config: a.roles, State: state} } // Return the options to use for client.FindLeader() or client.New() func (a *App) clientOptions() []client.Option { return []client.Option{client.WithDialFunc(a.dialFunc), client.WithLogFunc(a.log)} } func (a *App) debug(format string, args ...interface{}) { a.log(client.LogDebug, format, args...) } func (a *App) info(format string, args ...interface{}) { a.log(client.LogInfo, format, args...) } func (a *App) warn(format string, args ...interface{}) { a.log(client.LogWarn, format, args...) } func (a *App) error(format string, args ...interface{}) { a.log(client.LogError, format, args...) } golang-github-cowsql-go-cowsql-1.22.0/app/app_go1.18_test.go000066400000000000000000000076471447672437700235300ustar00rootroot00000000000000//go:build go1.18 // +build go1.18 package app_test // import ( // "context" // "crypto/tls" // "net" // "testing" // "github.com/canonical/go-dqlite/app" // "github.com/canonical/go-dqlite/client" // "github.com/quic-go/quic-go" // "github.com/stretchr/testify/assert" // "github.com/stretchr/testify/require" // ) // // quic.Stream doesn't implement net.Conn, so we need to wrap it. // type quicConn struct { // quic.Stream // } // func (c *quicConn) LocalAddr() net.Addr { // return nil // } // func (c *quicConn) RemoteAddr() net.Addr { // return nil // } // // TestExternalConnWithQUIC creates a 3-member cluster using external quic connection // // and ensures the cluster is successfully created, and that the connection is // // handled manually. // func TestExternalConnWithQUIC(t *testing.T) { // externalAddr1 := "127.0.0.1:9191" // externalAddr2 := "127.0.0.1:9292" // externalAddr3 := "127.0.0.1:9393" // acceptCh1 := make(chan net.Conn) // acceptCh2 := make(chan net.Conn) // acceptCh3 := make(chan net.Conn) // dialFunc := func(ctx context.Context, addr string) (net.Conn, error) { // conn, err := quic.DialAddrContext(ctx, addr, &tls.Config{InsecureSkipVerify: true, NextProtos: []string{"quic"}}, nil) // require.NoError(t, err) // stream, err := conn.OpenStreamSync(ctx) // require.NoError(t, err) // return &quicConn{ // Stream: stream, // }, nil // } // cert, pool := loadCert(t) // tlsconfig := app.SimpleListenTLSConfig(cert, pool) // tlsconfig.NextProtos = []string{"quic"} // tlsconfig.ClientAuth = tls.NoClientCert // serveQUIC := func(addr string, acceptCh chan net.Conn, cleanups chan func()) { // lis, err := quic.ListenAddr(addr, tlsconfig, nil) // require.NoError(t, err) // ctx, cancel := context.WithCancel(context.Background()) // go func() { // for { // select { // case <-ctx.Done(): // return // default: // conn, err := lis.Accept(context.Background()) // if err != nil { // return // } // stream, err := conn.AcceptStream(context.Background()) // if err != nil { // return // } // acceptCh <- &quicConn{ // Stream: stream, // } // } // } // }() // cleanup := func() { // cancel() // require.NoError(t, lis.Close()) // } // cleanups <- cleanup // } // liscleanups := make(chan func(), 3) // // Start up three listeners. // go serveQUIC(externalAddr1, acceptCh1, liscleanups) // go serveQUIC(externalAddr2, acceptCh2, liscleanups) // go serveQUIC(externalAddr3, acceptCh3, liscleanups) // defer func() { // for i := 0; i < 3; i++ { // cleanup := <-liscleanups // cleanup() // } // close(liscleanups) // }() // app1, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr1), app.WithExternalConn(dialFunc, acceptCh1)) // defer cleanup() // app2, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr2), app.WithExternalConn(dialFunc, acceptCh2), app.WithCluster([]string{externalAddr1})) // defer cleanup() // require.NoError(t, app2.Ready(context.Background())) // app3, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr3), app.WithExternalConn(dialFunc, acceptCh3), app.WithCluster([]string{externalAddr1})) // defer cleanup() // require.NoError(t, app3.Ready(context.Background())) // // Get a client from the first node (likely the leader). // cli, err := app1.Leader(context.Background()) // require.NoError(t, err) // defer cli.Close() // // Ensure entries exist for each cluster member. // cluster, err := cli.Cluster(context.Background()) // require.NoError(t, err) // assert.Equal(t, externalAddr1, cluster[0].Address) // assert.Equal(t, externalAddr2, cluster[1].Address) // assert.Equal(t, externalAddr3, cluster[2].Address) // // Every cluster member should be a voter. // assert.Equal(t, client.Voter, cluster[0].Role) // assert.Equal(t, client.Voter, cluster[1].Role) // assert.Equal(t, client.Voter, cluster[2].Role) // } golang-github-cowsql-go-cowsql-1.22.0/app/app_test.go000066400000000000000000001016461447672437700225250ustar00rootroot00000000000000package app_test import ( "bufio" "context" "crypto/tls" "crypto/x509" "database/sql" "encoding/binary" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "path/filepath" "strings" "testing" "time" "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/client" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Create a pristine bootstrap node with default value. func TestNew_PristineDefault(t *testing.T) { _, cleanup := newApp(t, app.WithAddress("127.0.0.1:9000")) defer cleanup() } // Create a pristine joining node. func TestNew_PristineJoiner(t *testing.T) { addr1 := "127.0.0.1:9001" addr2 := "127.0.0.1:9002" app1, cleanup := newApp(t, app.WithAddress(addr1)) defer cleanup() app2, cleanup := newApp(t, app.WithAddress(addr2), app.WithCluster([]string{addr1})) defer cleanup() require.NoError(t, app2.Ready(context.Background())) // The joining node to appear in the cluster list. cli, err := app1.Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, addr1, cluster[0].Address) assert.Equal(t, addr2, cluster[1].Address) // Initially the node joins as spare. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Spare, cluster[1].Role) } // Restart a node that had previously joined the cluster successfully. func TestNew_JoinerRestart(t *testing.T) { addr1 := "127.0.0.1:9001" addr2 := "127.0.0.1:9002" app1, cleanup := newApp(t, app.WithAddress(addr1)) defer cleanup() require.NoError(t, app1.Ready(context.Background())) dir2, cleanup := newDir(t) defer cleanup() app2, cleanup := newAppWithDir(t, dir2, app.WithAddress(addr2), app.WithCluster([]string{addr1})) require.NoError(t, app2.Ready(context.Background())) cleanup() app2, cleanup = newAppWithDir(t, dir2, app.WithAddress(addr2)) defer cleanup() require.NoError(t, app2.Ready(context.Background())) } // The second joiner promotes itself and also the first joiner. func TestNew_SecondJoiner(t *testing.T) { addr1 := "127.0.0.1:9001" addr2 := "127.0.0.1:9002" addr3 := "127.0.0.1:9003" app1, cleanup := newApp(t, app.WithAddress(addr1)) defer cleanup() app2, cleanup := newApp(t, app.WithAddress(addr2), app.WithCluster([]string{addr1})) defer cleanup() require.NoError(t, app2.Ready(context.Background())) app3, cleanup := newApp(t, app.WithAddress(addr3), app.WithCluster([]string{addr1})) defer cleanup() require.NoError(t, app3.Ready(context.Background())) cli, err := app1.Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, addr1, cluster[0].Address) assert.Equal(t, addr2, cluster[1].Address) assert.Equal(t, addr3, cluster[2].Address) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) } // The third joiner gets the stand-by role. func TestNew_ThirdJoiner(t *testing.T) { apps := []*app.App{} for i := 0; i < 4; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps = append(apps, app) } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) } // The fourth joiner gets the stand-by role. func TestNew_FourthJoiner(t *testing.T) { apps := []*app.App{} for i := 0; i < 5; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps = append(apps, app) } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) } // The fifth joiner gets the stand-by role. func TestNew_FifthJoiner(t *testing.T) { apps := []*app.App{} for i := 0; i < 6; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps = append(apps, app) } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) } // The sixth joiner gets the spare role. func TestNew_SixthJoiner(t *testing.T) { apps := []*app.App{} for i := 0; i < 7; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps = append(apps, app) } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) assert.Equal(t, client.Spare, cluster[6].Role) } // Transfer voting rights to another online node. func TestHandover_Voter(t *testing.T) { n := 4 apps := make([]*app.App, n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps[i] = app } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) require.NoError(t, apps[2].Handover(context.Background())) cluster, err = cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Spare, cluster[2].Role) assert.Equal(t, client.Voter, cluster[3].Role) } // In a two-node cluster only one of them is a voter. When Handover() is called // on the voter, the role and leadership are transfered. func TestHandover_TwoNodes(t *testing.T) { n := 2 apps := make([]*app.App, n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps[i] = app } err := apps[0].Handover(context.Background()) require.NoError(t, err) cli, err := apps[1].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Spare, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) } // Transfer voting rights to another online node. Failure domains are taken // into account. func TestHandover_VoterHonorFailureDomain(t *testing.T) { n := 6 apps := make([]*app.App, n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithFailureDomain(uint64(i % 3)), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps[i] = app } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) require.NoError(t, apps[2].Handover(context.Background())) cluster, err = cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Spare, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) assert.Equal(t, client.Voter, cluster[5].Role) } // Handover with a sinle node. func TestHandover_SingleNode(t *testing.T) { dir, cleanup := newDir(t) defer cleanup() app, err := app.New(dir, app.WithAddress("127.0.0.1:9001")) require.NoError(t, err) require.NoError(t, app.Ready(context.Background())) require.NoError(t, app.Handover(context.Background())) require.NoError(t, app.Close()) } // Exercise a sequential graceful shutdown of a 3-node cluster. func TestHandover_GracefulShutdown(t *testing.T) { n := 3 apps := make([]*app.App, n) for i := 0; i < n; i++ { dir, cleanup := newDir(t) defer cleanup() addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, err := app.New(dir, options...) require.NoError(t, err) require.NoError(t, app.Ready(context.Background())) apps[i] = app } db, err := sql.Open(apps[0].Driver(), "test.db") require.NoError(t, err) _, err = db.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) require.NoError(t, db.Close()) require.NoError(t, apps[0].Handover(context.Background())) require.NoError(t, apps[0].Close()) require.NoError(t, apps[1].Handover(context.Background())) require.NoError(t, apps[1].Close()) require.NoError(t, apps[2].Handover(context.Background())) require.NoError(t, apps[2].Close()) } // Transfer the stand-by role to another online node. func TestHandover_StandBy(t *testing.T) { n := 7 apps := make([]*app.App, n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps[i] = app } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) assert.Equal(t, client.Spare, cluster[6].Role) require.NoError(t, apps[4].Handover(context.Background())) cluster, err = cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.Spare, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) assert.Equal(t, client.StandBy, cluster[6].Role) } // Transfer leadership and voting rights to another node. func TestHandover_TransferLeadership(t *testing.T) { n := 4 apps := make([]*app.App, n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{app.WithAddress(addr)} if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) defer cleanup() require.NoError(t, app.Ready(context.Background())) apps[i] = app } cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() leader, err := cli.Leader(context.Background()) require.NoError(t, err) require.NotNil(t, leader) require.Equal(t, apps[0].ID(), leader.ID) require.NoError(t, apps[0].Handover(context.Background())) cli, err = apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() leader, err = cli.Leader(context.Background()) require.NoError(t, err) assert.NotEqual(t, apps[0].ID(), leader.ID) cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Spare, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.Voter, cluster[3].Role) } // If a voter goes offline, another node takes its place. func TestRolesAdjustment_ReplaceVoter(t *testing.T) { n := 4 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(2 * time.Second), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() defer cleanups[3]() // A voter goes offline. cleanups[2]() time.Sleep(8 * time.Second) cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Spare, cluster[2].Role) assert.Equal(t, client.Voter, cluster[3].Role) } // If a voter goes offline, another node takes its place. If possible, pick a // voter from a failure domain which differs from the one of the two other // voters. func TestRolesAdjustment_ReplaceVoterHonorFailureDomain(t *testing.T) { n := 6 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(4 * time.Second), app.WithFailureDomain(uint64(i % 3)), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() defer cleanups[3]() defer cleanups[4]() defer cleanups[5]() // A voter in failure domain 2 goes offline. cleanups[2]() time.Sleep(18 * time.Second) cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) // The replacement was picked in the same failure domain. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Spare, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.StandBy, cluster[4].Role) assert.Equal(t, client.Voter, cluster[5].Role) } // If a voter goes offline, another node takes its place. Preference will be // given to candidates with lower weights. func TestRolesAdjustment_ReplaceVoterHonorWeight(t *testing.T) { n := 6 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(4 * time.Second), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() defer cleanups[3]() defer cleanups[4]() defer cleanups[5]() // A voter in failure domain 2 goes offline. cleanups[2]() cli, err := apps[3].Client(context.Background()) require.NoError(t, err) require.NoError(t, cli.Weight(context.Background(), uint64(15))) defer cli.Close() cli, err = apps[4].Client(context.Background()) require.NoError(t, err) require.NoError(t, cli.Weight(context.Background(), uint64(5))) defer cli.Close() cli, err = apps[5].Client(context.Background()) require.NoError(t, err) require.NoError(t, cli.Weight(context.Background(), uint64(10))) defer cli.Close() time.Sleep(18 * time.Second) cli, err = apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) // The stand-by with the lowest weight was picked. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Spare, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.Voter, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) } // If a voter goes offline, but no another node can its place, then nothing // chagnes. func TestRolesAdjustment_CantReplaceVoter(t *testing.T) { n := 4 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(4 * time.Second), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() // A voter and a spare go offline. cleanups[3]() cleanups[2]() time.Sleep(12 * time.Second) cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) } // If a stand-by goes offline, another node takes its place. func TestRolesAdjustment_ReplaceStandBy(t *testing.T) { n := 7 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(5 * time.Second), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() defer cleanups[2]() defer cleanups[3]() defer cleanups[5]() defer cleanups[6]() // A stand-by goes offline. cleanups[4]() time.Sleep(20 * time.Second) cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.Spare, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) assert.Equal(t, client.StandBy, cluster[6].Role) } // If a stand-by goes offline, another node takes its place. If possible, pick // a stand-by from a failure domain which differs from the one of the two other // stand-bys. func TestRolesAdjustment_ReplaceStandByHonorFailureDomains(t *testing.T) { n := 9 apps := make([]*app.App, n) cleanups := make([]func(), n) for i := 0; i < n; i++ { addr := fmt.Sprintf("127.0.0.1:900%d", i+1) options := []app.Option{ app.WithAddress(addr), app.WithRolesAdjustmentFrequency(5 * time.Second), app.WithFailureDomain(uint64(i % 3)), } if i > 0 { options = append(options, app.WithCluster([]string{"127.0.0.1:9001"})) } app, cleanup := newApp(t, options...) require.NoError(t, app.Ready(context.Background())) apps[i] = app cleanups[i] = cleanup } defer cleanups[0]() defer cleanups[1]() defer cleanups[2]() defer cleanups[3]() defer cleanups[5]() defer cleanups[6]() defer cleanups[7]() defer cleanups[8]() // A stand-by from failure domain 1 goes offline. cleanups[4]() time.Sleep(20 * time.Second) cli, err := apps[0].Leader(context.Background()) require.NoError(t, err) defer cli.Close() cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) // The replacement was picked in the same failure domain. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) assert.Equal(t, client.StandBy, cluster[3].Role) assert.Equal(t, client.Spare, cluster[4].Role) assert.Equal(t, client.StandBy, cluster[5].Role) assert.Equal(t, client.Spare, cluster[6].Role) assert.Equal(t, client.StandBy, cluster[7].Role) assert.Equal(t, client.Spare, cluster[8].Role) } // Open a database on a fresh one-node cluster. func TestOpen(t *testing.T) { app, cleanup := newApp(t, app.WithAddress("127.0.0.1:9000")) defer cleanup() db, err := app.Open(context.Background(), "test") require.NoError(t, err) defer db.Close() _, err = db.ExecContext(context.Background(), "CREATE TABLE foo(n INT)") assert.NoError(t, err) } // Test some setup options func TestOptions(t *testing.T) { options := []app.Option{ app.WithAddress("127.0.0.1:9000"), app.WithNetworkLatency(20 * time.Millisecond), app.WithSnapshotParams(cowsql.SnapshotParams{Threshold: 1024, Trailing: 1024}), app.WithTracing(client.LogDebug), } app, cleanup := newApp(t, options...) defer cleanup() require.NotNil(t, app) } // Test client connections dropping uncleanly. func TestProxy_Error(t *testing.T) { cert, pool := loadCert(t) dial := client.DialFuncWithTLS(client.DefaultDialFunc, app.SimpleDialTLSConfig(cert, pool)) _, cleanup := newApp(t, app.WithAddress("127.0.0.1:9000")) defer cleanup() // Simulate a client which writes the protocol header, then a Leader // request and finally drops before reading the response. conn, err := dial(context.Background(), "127.0.0.1:9000") require.NoError(t, err) protocol := make([]byte, 8) binary.LittleEndian.PutUint64(protocol, uint64(1)) n, err := conn.Write(protocol) require.NoError(t, err) assert.Equal(t, n, 8) header := make([]byte, 8) binary.LittleEndian.PutUint32(header[0:], 1) header[4] = 0 header[5] = 0 binary.LittleEndian.PutUint16(header[6:], 0) n, err = conn.Write(header) require.NoError(t, err) assert.Equal(t, n, 8) body := make([]byte, 8) n, err = conn.Write(body) require.NoError(t, err) assert.Equal(t, n, 8) time.Sleep(100 * time.Millisecond) conn.Close() time.Sleep(250 * time.Millisecond) } // If the given context is cancelled before initial tasks are completed, an // error is returned. func TestReady_Cancel(t *testing.T) { app, cleanup := newApp(t, app.WithAddress("127.0.0.1:9002"), app.WithCluster([]string{"127.0.0.1:9001"})) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() err := app.Ready(ctx) assert.Equal(t, ctx.Err(), err) } func newApp(t *testing.T, options ...app.Option) (*app.App, func()) { t.Helper() dir, dirCleanup := newDir(t) app, appCleanup := newAppWithDir(t, dir, options...) cleanup := func() { appCleanup() dirCleanup() } return app, cleanup } // TestExternalConn creates a 3-member cluster using external http connections // and ensures the cluster is successfully created, and that the connection is // handled manually. func TestExternalConnWithTCP(t *testing.T) { externalAddr1 := "127.0.0.1:9191" externalAddr2 := "127.0.0.1:9292" externalAddr3 := "127.0.0.1:9393" acceptCh1 := make(chan net.Conn) acceptCh2 := make(chan net.Conn) acceptCh3 := make(chan net.Conn) hijackStatus := "101 Switching Protocols" dialFunc := func(ctx context.Context, addr string) (net.Conn, error) { conn, err := net.Dial("tcp", addr) require.NoError(t, err) request := &http.Request{} request.URL, err = url.Parse("http://" + addr) require.NoError(t, err) require.NoError(t, request.Write(conn)) resp, err := http.ReadResponse(bufio.NewReader(conn), request) require.NoError(t, err) require.Equal(t, hijackStatus, resp.Status) return conn, nil } newHandler := func(acceptCh chan net.Conn) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { hijacker, ok := w.(http.Hijacker) require.True(t, ok) conn, _, err := hijacker.Hijack() require.NoError(t, err) acceptCh <- conn } } // Start up three listeners. go http.ListenAndServe(externalAddr1, newHandler(acceptCh1)) go http.ListenAndServe(externalAddr2, newHandler(acceptCh2)) go http.ListenAndServe(externalAddr3, newHandler(acceptCh3)) app1, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr1), app.WithExternalConn(dialFunc, acceptCh1)) defer cleanup() app2, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr2), app.WithExternalConn(dialFunc, acceptCh2), app.WithCluster([]string{externalAddr1})) defer cleanup() require.NoError(t, app2.Ready(context.Background())) app3, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr3), app.WithExternalConn(dialFunc, acceptCh3), app.WithCluster([]string{externalAddr1})) defer cleanup() require.NoError(t, app3.Ready(context.Background())) // Get a client from the first node (likely the leader). cli, err := app1.Leader(context.Background()) require.NoError(t, err) defer cli.Close() // Ensure entries exist for each cluster member. cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, externalAddr1, cluster[0].Address) assert.Equal(t, externalAddr2, cluster[1].Address) assert.Equal(t, externalAddr3, cluster[2].Address) // Every cluster member should be a voter. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) } // TestExternalPipe creates a 3-member cluster using net.Pipe // and ensures the cluster is successfully created, and that the connection is // handled manually. func TestExternalConnWithPipe(t *testing.T) { externalAddr1 := "first" externalAddr2 := "second" externalAddr3 := "third" acceptCh1 := make(chan net.Conn) acceptCh2 := make(chan net.Conn) acceptCh3 := make(chan net.Conn) dialChannels := map[string]chan net.Conn{ externalAddr1: acceptCh1, externalAddr2: acceptCh2, externalAddr3: acceptCh3, } dialFunc := func(_ context.Context, addr string) (net.Conn, error) { client, server := net.Pipe() dialChannels[addr] <- server return client, nil } app1, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr1), app.WithExternalConn(dialFunc, acceptCh1)) defer cleanup() app2, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr2), app.WithExternalConn(dialFunc, acceptCh2), app.WithCluster([]string{externalAddr1})) defer cleanup() require.NoError(t, app2.Ready(context.Background())) app3, cleanup := newAppWithNoTLS(t, app.WithAddress(externalAddr3), app.WithExternalConn(dialFunc, acceptCh3), app.WithCluster([]string{externalAddr1})) defer cleanup() require.NoError(t, app3.Ready(context.Background())) // Get a client from the first node (likely the leader). cli, err := app1.Leader(context.Background()) require.NoError(t, err) defer cli.Close() // Ensure entries exist for each cluster member. cluster, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Equal(t, externalAddr1, cluster[0].Address) assert.Equal(t, externalAddr2, cluster[1].Address) assert.Equal(t, externalAddr3, cluster[2].Address) // Every cluster member should be a voter. assert.Equal(t, client.Voter, cluster[0].Role) assert.Equal(t, client.Voter, cluster[1].Role) assert.Equal(t, client.Voter, cluster[2].Role) } func TestParallelNewApp(t *testing.T) { t.Parallel() for i := 0; i < 100; i++ { i := i t.Run(fmt.Sprintf("run-%d", i), func(tt *testing.T) { tt.Parallel() // TODO: switch this to tt.TempDir when we switch to tmpDir := filepath.Join(os.TempDir(), strings.ReplaceAll(tt.Name(), "/", "-")) require.NoError(tt, os.MkdirAll(tmpDir, 0700)) dqApp, err := app.New(tmpDir, app.WithAddress(fmt.Sprintf("127.0.0.1:%d", 10200+i)), ) require.NoError(tt, err) defer func() { _ = dqApp.Close() _ = os.RemoveAll(tmpDir) }() }) } } func newAppWithDir(t *testing.T, dir string, options ...app.Option) (*app.App, func()) { t.Helper() appIndex++ index := appIndex log := func(l client.LogLevel, format string, a ...interface{}) { format = fmt.Sprintf("%s - %d: %s: %s", time.Now().Format("15:04:01.000"), index, l.String(), format) t.Logf(format, a...) } cert, pool := loadCert(t) options = append(options, app.WithLogFunc(log), app.WithTLS(app.SimpleTLSConfig(cert, pool))) app, err := app.New(dir, options...) require.NoError(t, err) cleanup := func() { require.NoError(t, app.Close()) } return app, cleanup } func newAppWithNoTLS(t *testing.T, options ...app.Option) (*app.App, func()) { t.Helper() dir, dirCleanup := newDir(t) appIndex++ index := appIndex log := func(l client.LogLevel, format string, a ...interface{}) { format = fmt.Sprintf("%s - %d: %s: %s", time.Now().Format("15:04:01.000"), index, l.String(), format) t.Logf(format, a...) } options = append(options, app.WithLogFunc(log)) app, err := app.New(dir, options...) require.NoError(t, err) cleanup := func() { require.NoError(t, app.Close()) dirCleanup() } return app, cleanup } // Loads the test TLS certificates. func loadCert(t *testing.T) (tls.Certificate, *x509.CertPool) { t.Helper() crt := filepath.Join("testdata", "cluster.crt") key := filepath.Join("testdata", "cluster.key") keypair, err := tls.LoadX509KeyPair(crt, key) require.NoError(t, err) data, err := ioutil.ReadFile(crt) require.NoError(t, err) pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(data) { t.Fatal("bad certificate") } return keypair, pool } var appIndex int // Return a new temporary directory. func newDir(t *testing.T) (string, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-app-test-") assert.NoError(t, err) cleanup := func() { os.RemoveAll(dir) } return dir, cleanup } func Test_TxRowsAffected(t *testing.T) { app, cleanup := newAppWithNoTLS(t, app.WithAddress("127.0.0.1:9001")) defer cleanup() err := app.Ready(context.Background()) require.NoError(t, err) db, err := app.Open(context.Background(), "test") require.NoError(t, err) defer db.Close() _, err = db.ExecContext(context.Background(), ` CREATE TABLE test ( id TEXT PRIMARY KEY, value INT );`) require.NoError(t, err) // Insert watermark err = tx(context.Background(), db, func(ctx context.Context, tx *sql.Tx) error { query := ` INSERT INTO test (id, value) VALUES ('id0', -1); ` result, err := tx.ExecContext(ctx, query) if err != nil { return err } _, err = result.RowsAffected() if err != nil { return err } return nil }) require.NoError(t, err) // Update watermark err = tx(context.Background(), db, func(ctx context.Context, tx *sql.Tx) error { query := ` UPDATE test SET value = 1 WHERE id = 'id0'; ` result, err := tx.ExecContext(ctx, query) if err != nil { return err } affected, err := result.RowsAffected() if err != nil { return err } if affected != 1 { return fmt.Errorf("expected 1 row affected, got %d", affected) } return nil }) require.NoError(t, err) } func tx(ctx context.Context, db *sql.DB, fn func(context.Context, *sql.Tx) error) error { tx, err := db.BeginTx(ctx, nil) if err != nil { return err } if err := fn(ctx, tx); err != nil { _ = tx.Rollback() return err } return tx.Commit() } golang-github-cowsql-go-cowsql-1.22.0/app/dial.go000066400000000000000000000027371447672437700216200ustar00rootroot00000000000000package app import ( "context" "crypto/tls" "fmt" "net" "github.com/cowsql/go-cowsql/client" ) // Like client.DialFuncWithTLS but also starts the proxy, since the raft // connect function only supports Unix and TCP connections. func makeNodeDialFunc(appCtx context.Context, config *tls.Config) client.DialFunc { dial := func(ctx context.Context, addr string) (net.Conn, error) { clonedConfig := config.Clone() if len(clonedConfig.ServerName) == 0 { remoteIP, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } clonedConfig.ServerName = remoteIP } dialer := &net.Dialer{} conn, err := dialer.DialContext(ctx, "tcp", addr) if err != nil { return nil, err } goUnix, cUnix, err := socketpair() if err != nil { return nil, fmt.Errorf("create pair of Unix sockets: %w", err) } go proxy(appCtx, conn, goUnix, clonedConfig) return cUnix, nil } return dial } // extDialFuncWithProxy executes given DialFunc and then copies the data back // and forth between the remote connection and a local unix socket. func extDialFuncWithProxy(appCtx context.Context, dialFunc client.DialFunc) client.DialFunc { return func(ctx context.Context, addr string) (net.Conn, error) { goUnix, cUnix, err := socketpair() if err != nil { return nil, fmt.Errorf("create pair of Unix sockets: %w", err) } conn, err := dialFunc(ctx, addr) if err != nil { return nil, err } go proxy(appCtx, conn, goUnix, nil) return cUnix, nil } } golang-github-cowsql-go-cowsql-1.22.0/app/example_test.go000066400000000000000000000052341447672437700233740ustar00rootroot00000000000000package app_test import ( "fmt" "io/ioutil" "os" "github.com/cowsql/go-cowsql/app" ) // To start the first node of a cowsql cluster for the first time, its network // address should be specified using the app.WithAddress() option. // // When the node is restarted a second time, the app.WithAddress() option might // be omitted, since the node address will be persisted in the info.yaml file. // // The very first node has always the same ID (cowsql.BootstrapID). func Example() { dir, err := ioutil.TempDir("", "cowsql-app-example-") if err != nil { return } defer os.RemoveAll(dir) node, err := app.New(dir, app.WithAddress("127.0.0.1:9001")) if err != nil { return } fmt.Printf("0x%x %s\n", node.ID(), node.Address()) if err := node.Close(); err != nil { return } node, err = app.New(dir) if err != nil { return } defer node.Close() fmt.Printf("0x%x %s\n", node.ID(), node.Address()) // Output: 0x2dc171858c3155be 127.0.0.1:9001 // 0x2dc171858c3155be 127.0.0.1:9001 } // After starting the very first node, a second node can be started by passing // the address of the first node using the app.WithCluster() option. // // In general additional nodes can be started by specifying one or more // addresses of existing nodes using the app.Cluster() option. // // When the node is restarted a second time, the app.WithCluster() option might // be omitted, since the node has already joined the cluster. // // Each additional node will be automatically assigned a unique ID. func ExampleWithCluster() { dir1, err := ioutil.TempDir("", "cowsql-app-example-") if err != nil { return } defer os.RemoveAll(dir1) dir2, err := ioutil.TempDir("", "cowsql-app-example-") if err != nil { return } defer os.RemoveAll(dir2) dir3, err := ioutil.TempDir("", "cowsql-app-example-") if err != nil { return } defer os.RemoveAll(dir3) node1, err := app.New(dir1, app.WithAddress("127.0.0.1:9001")) if err != nil { return } defer node1.Close() node2, err := app.New(dir2, app.WithAddress("127.0.0.1:9002"), app.WithCluster([]string{"127.0.0.1:9001"})) if err != nil { return } defer node2.Close() node3, err := app.New(dir3, app.WithAddress("127.0.0.1:9003"), app.WithCluster([]string{"127.0.0.1:9001"})) if err != nil { return } fmt.Println(node1.ID() != node2.ID(), node1.ID() != node3.ID(), node2.ID() != node3.ID()) // true true true // Restart the third node, the only argument we need to pass to // app.New() is its dir. id3 := node3.ID() if err := node3.Close(); err != nil { return } node3, err = app.New(dir3) if err != nil { return } defer node3.Close() fmt.Println(node3.ID() == id3, node3.Address()) // true 127.0.0.1:9003 } golang-github-cowsql-go-cowsql-1.22.0/app/files.go000066400000000000000000000035361447672437700220070ustar00rootroot00000000000000package app import ( "fmt" "io/ioutil" "os" "path/filepath" "gopkg.in/yaml.v2" "github.com/google/renameio" ) const ( // Store the node ID and address. infoFile = "info.yaml" // The node store file. storeFile = "cluster.yaml" // This is a "flag" file to signal when a brand new node needs to join // the cluster. In case the node doesn't successfully make it to join // the cluster first time it's started, it will re-try the next time. joinFile = "join" ) // Return true if the given file exists in the given directory. func fileExists(dir, file string) (bool, error) { path := filepath.Join(dir, file) if _, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { return false, fmt.Errorf("check if %s exists: %w", file, err) } return false, nil } return true, nil } // Write a file in the given directory. func fileWrite(dir, file string, data []byte) error { path := filepath.Join(dir, file) if err := renameio.WriteFile(path, data, 0600); err != nil { return fmt.Errorf("write %s: %w", file, err) } return nil } // Marshal the given object as YAML into the given file. func fileMarshal(dir, file string, object interface{}) error { data, err := yaml.Marshal(object) if err != nil { return fmt.Errorf("marshall %s: %w", file, err) } if err := fileWrite(dir, file, data); err != nil { return err } return nil } // Unmarshal the given YAML file into the given object. func fileUnmarshal(dir, file string, object interface{}) error { path := filepath.Join(dir, file) data, err := ioutil.ReadFile(path) if err != nil { return fmt.Errorf("read %s: %w", file, err) } if err := yaml.Unmarshal(data, object); err != nil { return fmt.Errorf("unmarshall %s: %w", file, err) } return nil } // Remove a file in the given directory. func fileRemove(dir, file string) error { return os.Remove(filepath.Join(dir, file)) } golang-github-cowsql-go-cowsql-1.22.0/app/options.go000066400000000000000000000201551447672437700223740ustar00rootroot00000000000000package app import ( "crypto/tls" "fmt" "log" "net" "strings" "time" "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" ) // Option can be used to tweak app parameters. type Option func(*options) // WithAddress sets the network address of the application node. // // Other application nodes must be able to connect to this application node // using the given address. // // If the application node is not the first one in the cluster, the address // must match the value that was passed to the App.Add() method upon // registration. // // If not given the first non-loopback IP address of any of the system network // interfaces will be used, with port 9000. // // The address must be stable across application restarts. func WithAddress(address string) Option { return func(options *options) { options.Address = address } } // WithCluster must be used when starting a newly added application node for // the first time. // // It should contain the addresses of one or more applications nodes which are // already part of the cluster. func WithCluster(cluster []string) Option { return func(options *options) { options.Cluster = cluster } } // WithExternalConn enables passing an external dial function that will be used // whenever cowsql needs to make an outside connection. // // Also takes a net.Conn channel that should be received when the external connection has been accepted. func WithExternalConn(dialFunc client.DialFunc, acceptCh chan net.Conn) Option { return func(options *options) { options.Conn = &connSetup{ dialFunc: dialFunc, acceptCh: acceptCh, } } } // WithTLS enables TLS encryption of network traffic. // // The "listen" parameter must hold the TLS configuration to use when accepting // incoming connections clients or application nodes. // // The "dial" parameter must hold the TLS configuration to use when // establishing outgoing connections to other application nodes. func WithTLS(listen *tls.Config, dial *tls.Config) Option { return func(options *options) { options.TLS = &tlsSetup{ Listen: listen, Dial: dial, } } } // WithUnixSocket allows setting a specific socket path for communication between go-cowsql and cowsql. // // The default is an empty string which means a random abstract unix socket. func WithUnixSocket(path string) Option { return func(options *options) { options.UnixSocket = path } } // WithVoters sets the number of nodes in the cluster that should have the // Voter role. // // When a new node is added to the cluster or it is started again after a // shutdown it will be assigned the Voter role in case the current number of // voters is below n. // // Similarly when a node with the Voter role is shutdown gracefully by calling // the Handover() method, it will try to transfer its Voter role to another // non-Voter node, if one is available. // // All App instances in a cluster must be created with the same WithVoters // setting. // // The given value must be an odd number greater than one. // // The default value is 3. func WithVoters(n int) Option { return func(options *options) { options.Voters = n } } // WithStandBys sets the number of nodes in the cluster that should have the // StandBy role. // // When a new node is added to the cluster or it is started again after a // shutdown it will be assigned the StandBy role in case there are already // enough online voters, but the current number of stand-bys is below n. // // Similarly when a node with the StandBy role is shutdown gracefully by // calling the Handover() method, it will try to transfer its StandBy role to // another non-StandBy node, if one is available. // // All App instances in a cluster must be created with the same WithStandBys // setting. // // The default value is 3. func WithStandBys(n int) Option { return func(options *options) { options.StandBys = n } } // WithRolesAdjustmentFrequency sets the frequency at which the current cluster // leader will check if the roles of the various nodes in the cluster matches // the desired setup and perform promotions/demotions to adjust the situation // if needed. // // The default is 30 seconds. func WithRolesAdjustmentFrequency(frequency time.Duration) Option { return func(options *options) { options.RolesAdjustmentFrequency = frequency } } // WithLogFunc sets a custom log function. func WithLogFunc(log client.LogFunc) Option { return func(options *options) { options.Log = log } } // WithTracing will emit a log message at the given level every time a // statement gets executed. func WithTracing(level client.LogLevel) Option { return func(options *options) { options.Tracing = level } } // WithFailureDomain sets the node's failure domain. // // Failure domains are taken into account when deciding which nodes to promote // to Voter or StandBy when needed. func WithFailureDomain(code uint64) Option { return func(options *options) { options.FailureDomain = code } } // WithNetworkLatency sets the average one-way network latency. func WithNetworkLatency(latency time.Duration) Option { return func(options *options) { options.NetworkLatency = latency } } // WithSnapshotParams sets the raft snapshot parameters. func WithSnapshotParams(params cowsql.SnapshotParams) Option { return func(options *options) { options.SnapshotParams = params } } // WithAutoRecovery enables or disables auto-recovery of persisted data // at startup for this node. // // When auto-recovery is enabled, raft snapshots and segment files may be // deleted at startup if they are determined to be corrupt. This helps // the startup process to succeed in more cases, but can lead to data loss. // // Auto-recovery is enabled by default. func WithAutoRecovery(recovery bool) Option { return func(options *options) { options.AutoRecovery = recovery } } type tlsSetup struct { Listen *tls.Config Dial *tls.Config } type connSetup struct { dialFunc client.DialFunc acceptCh chan net.Conn } type options struct { Address string Cluster []string Log client.LogFunc Tracing client.LogLevel TLS *tlsSetup Conn *connSetup Voters int StandBys int RolesAdjustmentFrequency time.Duration FailureDomain uint64 NetworkLatency time.Duration UnixSocket string SnapshotParams cowsql.SnapshotParams AutoRecovery bool } // Create a options object with sane defaults. func defaultOptions() *options { return &options{ Log: defaultLogFunc, Tracing: client.LogNone, Voters: 3, StandBys: 3, RolesAdjustmentFrequency: 30 * time.Second, AutoRecovery: true, } } func isLoopback(iface *net.Interface) bool { return int(iface.Flags&net.FlagLoopback) > 0 } // see https://stackoverflow.com/a/48519490/3613657 // Valid IPv4 notations: // // "192.168.0.1": basic // "192.168.0.1:80": with port info // // Valid IPv6 notations: // // "::FFFF:C0A8:1": basic // "::FFFF:C0A8:0001": leading zeros // "0000:0000:0000:0000:0000:FFFF:C0A8:1": double colon expanded // "::FFFF:C0A8:1%1": with zone info // "::FFFF:192.168.0.1": IPv4 literal // "[::FFFF:C0A8:1]:80": with port info // "[::FFFF:C0A8:1%1]:80": with zone and port info func isIpV4(ip string) bool { return strings.Count(ip, ":") < 2 } func defaultAddress() (addr string, err error) { ifaces, err := net.Interfaces() if err != nil { return "", err } for _, iface := range ifaces { if isLoopback(&iface) { continue } addrs, err := iface.Addrs() if err != nil { continue } if len(addrs) == 0 { continue } addr, ok := addrs[0].(*net.IPNet) if !ok { continue } ipStr := addr.IP.String() if isIpV4(ipStr) { return addr.IP.String() + ":9000", nil } else { return "[" + addr.IP.String() + "]" + ":9000", nil } } return "", fmt.Errorf("no suitable net.Interface found: %v", err) } func defaultLogFunc(l client.LogLevel, format string, a ...interface{}) { // Log only error messages if l != client.LogError { return } msg := fmt.Sprintf("["+l.String()+"]"+" cowsql: "+format, a...) log.Printf(msg) } golang-github-cowsql-go-cowsql-1.22.0/app/proxy.go000066400000000000000000000130261447672437700220610ustar00rootroot00000000000000package app import ( "context" "crypto/tls" "fmt" "io" "net" "os" "reflect" "syscall" "time" "unsafe" "golang.org/x/sys/unix" ) // Copies data between a remote TCP network connection (possibly with TLS) and // a local unix socket. // // The function will return if one of the following events occurs: // // - the other end of the remote network socket closes the connection // - the other end of the local unix socket closes the connection // - the context is cancelled // - an error occurs when writing or reading data // // In case of errors, details are returned. func proxy(ctx context.Context, remote net.Conn, local net.Conn, config *tls.Config) error { tcp, err := tryExtractTCPConn(remote) if err == nil { if err := setKeepalive(tcp); err != nil { return err } } if config != nil { if config.ClientCAs != nil { remote = tls.Server(remote, config) } else { remote = tls.Client(remote, config) } } remoteToLocal := make(chan error, 0) localToRemote := make(chan error, 0) // Start copying data back and forth until either the client or the // server get closed or hit an error. go func() { _, err := io.Copy(local, remote) remoteToLocal <- err }() go func() { _, err := io.Copy(remote, local) localToRemote <- err }() errs := make([]error, 2) select { case <-ctx.Done(): // Force closing, ignore errors. remote.Close() local.Close() <-remoteToLocal <-localToRemote case err := <-remoteToLocal: if err != nil { errs[0] = fmt.Errorf("remote -> local: %v", err) } local.(*net.UnixConn).CloseRead() if err := <-localToRemote; err != nil { errs[1] = fmt.Errorf("local -> remote: %v", err) } remote.Close() local.Close() case err := <-localToRemote: if err != nil { errs[0] = fmt.Errorf("local -> remote: %v", err) } if tcp != nil { tcp.CloseRead() } if err := <-remoteToLocal; err != nil { errs[1] = fmt.Errorf("remote -> local: %v", err) } remote.Close() local.Close() } if errs[0] != nil || errs[1] != nil { return proxyError{first: errs[0], second: errs[1]} } return nil } // tryExtractTCPConn tries to extract the underlying net.TCPConn, potentially from a tls.Conn. func tryExtractTCPConn(conn net.Conn) (*net.TCPConn, error) { tcp, ok := conn.(*net.TCPConn) if ok { return tcp, nil } // Go doesn't currently expose the underlying TCP connection of a TLS connection, but we need it in order // to set timeout properties on the connection. We use some reflect/unsafe magic to extract the private // remote.conn field, which is indeed the underlying TCP connection. tlsConn, ok := conn.(*tls.Conn) if !ok { return nil, fmt.Errorf("connection is not a tls.Conn") } field := reflect.ValueOf(tlsConn).Elem().FieldByName("conn") field = reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem() c := field.Interface() tcpConn, ok := c.(*net.TCPConn) if !ok { return nil, fmt.Errorf("connection is not a net.TCPConn") } return tcpConn, nil } // Set TCP_USER_TIMEOUT and TCP keepalive with 3 seconds idle time, 3 seconds // retry interval with at most 3 retries. // // See https://thenotexpert.com/golang-tcp-keepalive/. func setKeepalive(conn *net.TCPConn) error { err := conn.SetKeepAlive(true) if err != nil { return err } err = conn.SetKeepAlivePeriod(time.Second * 3) if err != nil { return err } raw, err := conn.SyscallConn() if err != nil { return err } raw.Control( func(ptr uintptr) { fd := int(ptr) // Number of probes. err = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, _TCP_KEEPCNT, 3) if err != nil { return } // Wait time after an unsuccessful probe. err = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, _TCP_KEEPINTVL, 3) if err != nil { return } // Set TCP_USER_TIMEOUT option to limit the maximum amount of time in ms that transmitted data may remain // unacknowledged before TCP will forcefully close the corresponding connection and return ETIMEDOUT to the // application. This combined with the TCP keepalive options on the socket will ensure that should the // remote side of the connection disappear abruptly that cowsql will detect this and close the socket quickly. // Decreasing the user timeouts allows applications to "fail fast" if so desired. Otherwise it may take // up to 20 minutes with the current system defaults in a normal WAN environment if there are packets in // the send queue that will prevent the keepalive timer from working as the retransmission timers kick in. // See https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=dca43c75e7e545694a9dd6288553f55c53e2a3a3 err = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(30*time.Microsecond)) if err != nil { return } }) return err } // Returns a pair of connected unix sockets. func socketpair() (net.Conn, net.Conn, error) { fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) if err != nil { return nil, nil, err } c1, err := fdToFileConn(fds[0]) if err != nil { return nil, nil, err } c2, err := fdToFileConn(fds[1]) if err != nil { c1.Close() return nil, nil, err } return c1, c2, err } func fdToFileConn(fd int) (net.Conn, error) { f := os.NewFile(uintptr(fd), "") defer f.Close() return net.FileConn(f) } type proxyError struct { first error second error } func (e proxyError) Error() string { msg := "" if e.first != nil { msg += "first: " + e.first.Error() } if e.second != nil { if e.first != nil { msg += " " } msg += "second: " + e.second.Error() } return msg } golang-github-cowsql-go-cowsql-1.22.0/app/proxy_darwin.go000066400000000000000000000003071447672437700234230ustar00rootroot00000000000000// +build darwin package app // from netinet/tcp.h (OS X 10.9.4) const ( _TCP_KEEPINTVL = 0x101 /* interval between keepalives */ _TCP_KEEPCNT = 0x102 /* number of keepalives before close */ ) golang-github-cowsql-go-cowsql-1.22.0/app/proxy_linux.go000066400000000000000000000003311447672437700232730ustar00rootroot00000000000000// +build linux package app import ( "syscall" ) const ( _TCP_KEEPINTVL = syscall.TCP_KEEPINTVL /* interval between keepalives */ _TCP_KEEPCNT = syscall.TCP_KEEPCNT /* number of keepalives before close */ ) golang-github-cowsql-go-cowsql-1.22.0/app/roles.go000066400000000000000000000207641447672437700220330ustar00rootroot00000000000000package app import ( "sort" "github.com/cowsql/go-cowsql/client" ) const minVoters = 3 // RolesConfig can be used to tweak the algorithm implemented by RolesChanges. type RolesConfig struct { Voters int // Target number of voters, 3 by default. StandBys int // Target number of stand-bys, 3 by default. } // RolesChanges implements an algorithm to take decisions about which node // should have which role in a cluster. // // You normally don't need to use this data structure since it's already // transparently wired into the high-level App object. However this is exposed // for users who don't want to use the high-level App object but still want to // implement the same roles management algorithm. type RolesChanges struct { // Algorithm configuration. Config RolesConfig // Current state of the cluster. Each node in the cluster must be // present as a key in the map, and its value should be its associated // failure domain and weight metadata or nil if the node is currently // offline. State map[client.NodeInfo]*client.NodeMetadata } // Assume decides if a node should assume a different role than the one it // currently has. It should normally be run at node startup, where the // algorithm might decide that the node should assume the Voter or Stand-By // role in case there's a shortage of them. // // Return -1 in case no role change is needed. func (c *RolesChanges) Assume(id uint64) client.NodeRole { // If the cluster is still too small, do nothing. if c.size() < minVoters { return -1 } node := c.get(id) // If we are not in the cluster, it means we were removed, just do nothing. if node == nil { return -1 } // If we already have the Voter or StandBy role, there's nothing to do. if node.Role == client.Voter || node.Role == client.StandBy { return -1 } onlineVoters := c.list(client.Voter, true) onlineStandbys := c.list(client.StandBy, true) // If we have already the desired number of online voters and // stand-bys, there's nothing to do. if len(onlineVoters) >= c.Config.Voters && len(onlineStandbys) >= c.Config.StandBys { return -1 } // Figure if we need to become stand-by or voter. role := client.StandBy if len(onlineVoters) < c.Config.Voters { role = client.Voter } return role } // Handover decides if a node should transfer its current role to another // node. This is typically run when the node is shutting down and is hence going to be offline soon. // // Return the role that should be handed over and list of candidates that // should receive it, in order of preference. func (c *RolesChanges) Handover(id uint64) (client.NodeRole, []client.NodeInfo) { node := c.get(id) // If we are not in the cluster, it means we were removed, just do nothing. if node == nil { return -1, nil } // If we aren't a voter or a stand-by, there's nothing to do. if node.Role != client.Voter && node.Role != client.StandBy { return -1, nil } // Make a list of all online nodes with the same role and get their // failure domains. peers := c.list(node.Role, true) for i := range peers { if peers[i].ID == node.ID { peers = append(peers[:i], peers[i+1:]...) break } } domains := c.failureDomains(peers) // Online spare nodes are always candidates. candidates := c.list(client.Spare, true) // Stand-by nodes are candidates if we need to transfer voting // rights, and they are preferred over spares. if node.Role == client.Voter { candidates = append(c.list(client.StandBy, true), candidates...) } if len(candidates) == 0 { // No online node available to be promoted. return -1, nil } c.sortCandidates(candidates, domains) return node.Role, candidates } // Adjust decides if there should be changes in the current roles. // // Return the role that should be assigned and a list of candidates that should // assume it, in order of preference. func (c *RolesChanges) Adjust(leader uint64) (client.NodeRole, []client.NodeInfo) { if c.size() == 1 { return -1, nil } // If the cluster is too small, make sure we have just one voter (us). if c.size() < minVoters { for node := range c.State { if node.ID == leader || node.Role != client.Voter { continue } return client.Spare, []client.NodeInfo{node} } return -1, nil } onlineVoters := c.list(client.Voter, true) onlineStandbys := c.list(client.StandBy, true) offlineVoters := c.list(client.Voter, false) offlineStandbys := c.list(client.StandBy, false) // If we have exactly the desired number of voters and stand-bys, and they are all // online, we're good. if len(offlineVoters) == 0 && len(onlineVoters) == c.Config.Voters && len(offlineStandbys) == 0 && len(onlineStandbys) == c.Config.StandBys { return -1, nil } // If we have less online voters than desired, let's try to promote // some other node. if n := len(onlineVoters); n < c.Config.Voters { candidates := c.list(client.StandBy, true) candidates = append(candidates, c.list(client.Spare, true)...) if len(candidates) == 0 { return -1, nil } domains := c.failureDomains(onlineVoters) c.sortCandidates(candidates, domains) return client.Voter, candidates } // If we have more online voters than desired, let's demote one of // them. if n := len(onlineVoters); n > c.Config.Voters { nodes := []client.NodeInfo{} for _, node := range onlineVoters { // Don't demote the leader. if node.ID == leader { continue } nodes = append(nodes, node) } return client.Spare, nodes } // If we have offline voters, let's demote one of them. if n := len(offlineVoters); n > 0 { return client.Spare, offlineVoters } // If we have less online stand-bys than desired, let's try to promote // some other node. if n := len(onlineStandbys); n < c.Config.StandBys { candidates := c.list(client.Spare, true) if len(candidates) == 0 { return -1, nil } domains := c.failureDomains(onlineStandbys) c.sortCandidates(candidates, domains) return client.StandBy, candidates } // If we have more online stand-bys than desired, let's demote one of // them. if n := len(onlineStandbys); n > c.Config.StandBys { nodes := []client.NodeInfo{} for _, node := range onlineStandbys { // Don't demote the leader. if node.ID == leader { continue } nodes = append(nodes, node) } return client.Spare, nodes } // If we have offline stand-bys, let's demote one of them. if n := len(offlineStandbys); n > 0 { return client.Spare, offlineStandbys } return -1, nil } // Return the number of nodes il the cluster. func (c *RolesChanges) size() int { return len(c.State) } // Return information about the node with the given ID, or nil if no node // matches. func (c *RolesChanges) get(id uint64) *client.NodeInfo { for node := range c.State { if node.ID == id { return &node } } return nil } // Return the online or offline nodes with the given role. func (c *RolesChanges) list(role client.NodeRole, online bool) []client.NodeInfo { nodes := []client.NodeInfo{} for node, metadata := range c.State { if node.Role == role && metadata != nil == online { nodes = append(nodes, node) } } return nodes } // Return the number of online or offline nodes with the given role. func (c *RolesChanges) count(role client.NodeRole, online bool) int { return len(c.list(role, online)) } // Return a map of the failure domains associated with the // given nodes. func (c *RolesChanges) failureDomains(nodes []client.NodeInfo) map[uint64]bool { domains := map[uint64]bool{} for _, node := range nodes { metadata := c.State[node] if metadata == nil { continue } domains[metadata.FailureDomain] = true } return domains } // Sort the given candidates according to their failure domain and // weight. Candidates belonging to a failure domain different from the given // domains take precedence. func (c *RolesChanges) sortCandidates(candidates []client.NodeInfo, domains map[uint64]bool) { less := func(i, j int) bool { metadata1 := c.metadata(candidates[i]) metadata2 := c.metadata(candidates[j]) // If i's failure domain is not in the given list, but j's is, // then i takes precedence. if !domains[metadata1.FailureDomain] && domains[metadata2.FailureDomain] { return true } // If j's failure domain is not in the given list, but i's is, // then j takes precedence. if !domains[metadata2.FailureDomain] && domains[metadata1.FailureDomain] { return false } return metadata1.Weight < metadata2.Weight } sort.Slice(candidates, less) } // Return the metadata of the given node, if any. func (c *RolesChanges) metadata(node client.NodeInfo) *client.NodeMetadata { return c.State[node] } golang-github-cowsql-go-cowsql-1.22.0/app/testdata/000077500000000000000000000000001447672437700221605ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/app/testdata/cluster.crt000066400000000000000000000011571447672437700243570ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIBnjCCAUSgAwIBAgIUddf2VYy/riyr+d2rByY0OT/N2HEwCgYIKoZIzj0EAwIw FjEUMBIGA1UEAwwLZHFsaXRlLXRlc3QwHhcNMjExMTE3MTIxMTU2WhcNNDkwNDA0 MTIxMTU2WjAWMRQwEgYDVQQDDAtkcWxpdGUtdGVzdDBZMBMGByqGSM49AgEGCCqG SM49AwEHA0IABHhD/t8WFSlqi04l2ce8l4ZktVjMMCwZ5edEwAjJl2QOvaW6qkP1 wFAaE9LOHTDQNEJv/BsA0XIHKXpG7fTHISajcDBuMB0GA1UdDgQWBBQ1qdnDo6Qm eJ51EH2/CS1AzxM2BTAfBgNVHSMEGDAWgBQ1qdnDo6QmeJ51EH2/CS1AzxM2BTAP BgNVHRMBAf8EBTADAQH/MBsGA1UdEQQUMBKHBH8AAAGCCmxvY2FsLnRlc3QwCgYI KoZIzj0EAwIDSAAwRQIhAJPVzO4jh61qKw0au/7UVU1TERavD3XPwzQhhq0ph9/h AiA1k0k8Iruvlty/5PA/CPKxeBH7smUyquVLYQW5Y5GbzQ== -----END CERTIFICATE----- golang-github-cowsql-go-cowsql-1.22.0/app/testdata/cluster.key000066400000000000000000000004561447672437700243600ustar00rootroot00000000000000-----BEGIN EC PARAMETERS----- BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- MHcCAQEEIBxSTUI5Xk1nsd/yfovKZ0cNdPGEcCTANDs0epC/Vo5foAoGCCqGSM49 AwEHoUQDQgAEeEP+3xYVKWqLTiXZx7yXhmS1WMwwLBnl50TACMmXZA69pbqqQ/XA UBoT0s4dMNA0Qm/8GwDRcgcpekbt9MchJg== -----END EC PRIVATE KEY----- golang-github-cowsql-go-cowsql-1.22.0/app/tls.go000066400000000000000000000075311447672437700215060ustar00rootroot00000000000000package app import ( "crypto/tls" "crypto/x509" "fmt" ) // SimpleTLSConfig returns a pair of TLS configuration objects with sane // defaults, one to be used as server-side configuration when listening to // incoming connections and one to be used as client-side configuration when // establishing outgoing connections. // // The returned configs can be used as "listen" and "dial" parameters for the // WithTLS option. // // In order to generate a suitable TLS certificate you can use the openssl // command, for example: // // DNS=$(hostname) // IP=$(hostname -I | cut -f 1 -d ' ') // CN=example.com // openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 \ // -nodes -keyout cluster.key -out cluster.crt -subj "/CN=$CN" \ // -addext "subjectAltName=DNS:$DNS,IP:$IP" // // then load the resulting key pair and pool with: // // cert, _ := tls.LoadX509KeyPair("cluster.crt", "cluster.key") // data, _ := ioutil.ReadFile("cluster.crt") // pool := x509.NewCertPool() // pool.AppendCertsFromPEM(data) // // and finally use the WithTLS option together with the SimpleTLSConfig helper: // // app, _ := app.New("/my/dir", app.WithTLS(app.SimpleTLSConfig(cert, pool))) // // See SimpleListenTLSConfig and SimpleDialTLSConfig for details. func SimpleTLSConfig(cert tls.Certificate, pool *x509.CertPool) (*tls.Config, *tls.Config) { listen := SimpleListenTLSConfig(cert, pool) dial := SimpleDialTLSConfig(cert, pool) return listen, dial } // SimpleListenTLSConfig returns a server-side TLS configuration with sane // defaults (e.g. TLS version, ciphers and mutual authentication). // // The cert parameter must be a public/private key pair, typically loaded from // disk using tls.LoadX509KeyPair(). // // The pool parameter can be used to specify a custom signing CA (e.g. for // self-signed certificates). // // When server and client both use the same certificate, the same key pair and // pool should be passed to SimpleDialTLSConfig() in order to generate the // client-side config. // // The returned config can be used as "listen" parameter for the WithTLS // option. // // A user can modify the returned config to suit their specifig needs. func SimpleListenTLSConfig(cert tls.Certificate, pool *x509.CertPool) *tls.Config { config := &tls.Config{ MinVersion: tls.VersionTLS12, Certificates: []tls.Certificate{cert}, RootCAs: pool, ClientCAs: pool, ClientAuth: tls.RequireAndVerifyClientCert, } config.BuildNameToCertificate() return config } // SimpleDialTLSConfig returns a client-side TLS configuration with sane // defaults (e.g. TLS version, ciphers and mutual authentication). // // The cert parameter must be a public/private key pair, typically loaded from // disk using tls.LoadX509KeyPair(). // // The pool parameter can be used to specify a custom signing CA (e.g. for // self-signed certificates). // // When server and client both use the same certificate, the same key pair and // pool should be passed to SimpleListenTLSConfig() in order to generate the // server-side config. // // The returned config can be used as "client" parameter for the WithTLS App // option, or as "config" parameter for the client.DialFuncWithTLS() helper. // // TLS connections using the same `Config` will share a ClientSessionCache. // You can override this behaviour by setting your own ClientSessionCache or // nil. // // A user can modify the returned config to suit their specifig needs. func SimpleDialTLSConfig(cert tls.Certificate, pool *x509.CertPool) *tls.Config { config := &tls.Config{ MinVersion: tls.VersionTLS12, RootCAs: pool, Certificates: []tls.Certificate{cert}, } x509cert, err := x509.ParseCertificate(cert.Certificate[0]) if err != nil { panic(fmt.Errorf("parse certificate: %v", err)) } if len(x509cert.DNSNames) == 0 { panic("certificate has no DNS extension") } config.ServerName = x509cert.DNSNames[0] return config } golang-github-cowsql-go-cowsql-1.22.0/benchmark/000077500000000000000000000000001447672437700215215ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/benchmark/benchmark.go000066400000000000000000000100401447672437700237750ustar00rootroot00000000000000package benchmark import ( "context" "database/sql" "errors" "fmt" "os" "path" "time" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/client" ) const ( kvSchema = "CREATE TABLE IF NOT EXISTS model (key TEXT, value TEXT, UNIQUE(key))" ) type Benchmark struct { app *app.App db *sql.DB dir string options *options workers []*worker } func createWorkers(o *options) []*worker { workers := make([]*worker, o.nWorkers) for i := 0; i < o.nWorkers; i++ { switch o.workload { case kvWrite: workers[i] = newWorker(kvWriter, o) case kvReadWrite: workers[i] = newWorker(kvReaderWriter, o) } } return workers } func New(app *app.App, db *sql.DB, dir string, options ...Option) (bm *Benchmark, err error) { o := defaultOptions() for _, option := range options { option(o) } bm = &Benchmark{ app: app, db: db, dir: dir, options: o, workers: createWorkers(o), } return bm, nil } func (bm *Benchmark) runWorkload(ctx context.Context) { for _, worker := range bm.workers { go worker.run(ctx, bm.db) } } func (bm *Benchmark) kvSetup() error { _, err := bm.db.Exec(kvSchema) return err } func (bm *Benchmark) setup() error { switch bm.options.workload { default: return bm.kvSetup() } } func reportName(id int, work work) string { return fmt.Sprintf("%d-%s-%d", id, work, time.Now().Unix()) } // Returns a map of filename to filecontent func (bm *Benchmark) reportFiles() map[string]string { allReports := make(map[string]string) for i, worker := range bm.workers { reports := worker.report() for w, report := range reports { file := reportName(i, w) allReports[file] = fmt.Sprintf("%s", report) } } return allReports } func (bm *Benchmark) reportResults() error { dir := path.Join(bm.dir, "results") if err := os.MkdirAll(dir, 0755); err != nil { return fmt.Errorf("failed to create %v: %v", dir, err) } reports := bm.reportFiles() for filename, content := range reports { f, err := os.Create(path.Join(dir, filename)) if err != nil { return fmt.Errorf("failed to create %v in %v: %v", filename, dir, err) } _, err = f.WriteString(content) if err != nil { return fmt.Errorf("failed to write %v in %v: %v", filename, dir, err) } f.Sync() } return nil } func (bm *Benchmark) nodeOnline(node *client.NodeInfo) bool { ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) defer cancel() cli, err := client.New(ctx, node.Address) if err != nil { return false } cli.Close() return true } func (bm *Benchmark) allNodesOnline(ctx context.Context, cancel context.CancelFunc) { for { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return } cli, err := bm.app.Client(ctx) if err != nil { continue } nodes, err := cli.Cluster(ctx) if err != nil { continue } cli.Close() n := 0 for _, needed := range bm.options.cluster { for _, present := range nodes { if needed == present.Address && bm.nodeOnline(&present) { n += 1 } } } if len(bm.options.cluster) == n { cancel() return } } } func (bm *Benchmark) waitForCluster(ch <-chan os.Signal) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(bm.options.clusterTimeout)) defer cancel() go bm.allNodesOnline(ctx, cancel) select { case <-ctx.Done(): if !errors.Is(ctx.Err(), context.Canceled) { return fmt.Errorf("timed out waiting for cluster: %v", ctx.Err()) } return nil case <-ch: return fmt.Errorf("benchmark stopped, signal received while waiting for cluster") } } func (bm *Benchmark) Run(ch <-chan os.Signal) error { if err := bm.setup(); err != nil { return err } if err := bm.waitForCluster(ch); err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), bm.options.duration) defer cancel() bm.runWorkload(ctx) select { case <-ctx.Done(): break case <-ch: cancel() break } if err := bm.reportResults(); err != nil { return err } fmt.Printf("Benchmark done. Results available here:\n%s\n", path.Join(bm.dir, "results")) return nil } golang-github-cowsql-go-cowsql-1.22.0/benchmark/benchmark_test.go000066400000000000000000000053421447672437700250450ustar00rootroot00000000000000package benchmark_test import ( "context" "database/sql" "io/ioutil" "os" "testing" "time" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/benchmark" "github.com/stretchr/testify/require" ) const ( addr1 = "127.0.0.1:9011" addr2 = "127.0.0.1:9012" addr3 = "127.0.0.1:9013" ) func bmSetup(t *testing.T, addr string, join []string) (string, *app.App, *sql.DB, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-app-test-") require.NoError(t, err) app, err := app.New(dir, app.WithAddress(addr), app.WithCluster(join)) require.NoError(t, err) readyCtx, cancel := context.WithTimeout(context.Background(), time.Duration(3)*time.Second) err = app.Ready(readyCtx) require.NoError(t, err) db, err := app.Open(context.Background(), "benchmark") require.NoError(t, err) cleanups := func() { os.RemoveAll(dir) cancel() } return dir, app, db, cleanups } func bmRun(t *testing.T, bm *benchmark.Benchmark, app *app.App, db *sql.DB) { defer db.Close() defer app.Close() ch := make(chan os.Signal) err := bm.Run(ch) require.NoError(t, err) } // Create a Benchmark with default values. func TestNew_Default(t *testing.T) { dir, app, db, cleanup := bmSetup(t, addr1, nil) defer cleanup() bm, err := benchmark.New( app, db, dir, benchmark.WithCluster([]string{addr1}), benchmark.WithDuration(1)) require.NoError(t, err) bmRun(t, bm, app, db) } // Create a Benchmark with a kvReadWriteWorkload. func TestNew_KvReadWrite(t *testing.T) { dir, app, db, cleanup := bmSetup(t, addr1, nil) defer cleanup() bm, err := benchmark.New( app, db, dir, benchmark.WithCluster([]string{addr1}), benchmark.WithDuration(1), benchmark.WithWorkload("KvReadWrite")) require.NoError(t, err) bmRun(t, bm, app, db) } // Create a clustered Benchmark. func TestNew_ClusteredKvReadWrite(t *testing.T) { dir, app, db, cleanup := bmSetup(t, addr1, nil) _, _, _, cleanup2 := bmSetup(t, addr2, []string{addr1}) _, _, _, cleanup3 := bmSetup(t, addr3, []string{addr1}) defer cleanup() defer cleanup2() defer cleanup3() bm, err := benchmark.New( app, db, dir, benchmark.WithCluster([]string{addr1, addr2, addr3}), benchmark.WithDuration(2)) require.NoError(t, err) bmRun(t, bm, app, db) } // Create a clustered Benchmark that times out waiting for the cluster to form. func TestNew_ClusteredTimeout(t *testing.T) { dir, app, db, cleanup := bmSetup(t, addr1, nil) defer cleanup() defer db.Close() defer app.Close() bm, err := benchmark.New( app, db, dir, benchmark.WithCluster([]string{addr1, addr2}), benchmark.WithClusterTimeout(2)) require.NoError(t, err) ch := make(chan os.Signal) err = bm.Run(ch) require.Errorf(t, err, "Timed out waiting for cluster: context deadline exceeded") } golang-github-cowsql-go-cowsql-1.22.0/benchmark/options.go000066400000000000000000000041171447672437700235460ustar00rootroot00000000000000package benchmark import ( "strings" "time" ) type workload int32 const ( kvWrite workload = iota kvReadWrite workload = iota ) type Option func(*options) type options struct { cluster []string clusterTimeout time.Duration workload workload duration time.Duration nWorkers int kvKeySizeB int kvValueSizeB int } func parseWorkload(workload string) workload { switch strings.ToLower(workload) { case "kvwrite": return kvWrite case "kvreadwrite": return kvReadWrite default: return kvWrite } } // WithWorkload sets the workload of the benchmark. func WithWorkload(workload string) Option { return func(options *options) { options.workload = parseWorkload(workload) } } // WithDuration sets the duration of the benchmark. func WithDuration(seconds int) Option { return func(options *options) { options.duration = time.Duration(seconds) * time.Second } } // WithWorkers sets the number of workers of the benchmark. func WithWorkers(n int) Option { return func(options *options) { options.nWorkers = n } } // WithKvKeySize sets the size of the KV keys of the benchmark. func WithKvKeySize(bytes int) Option { return func(options *options) { options.kvKeySizeB = bytes } } // WithKvValueSize sets the size of the KV values of the benchmark. func WithKvValueSize(bytes int) Option { return func(options *options) { options.kvValueSizeB = bytes } } // WithCluster sets the cluster option of the benchmark. A benchmark will only // start once the whole cluster is online. func WithCluster(cluster []string) Option { return func(options *options) { options.cluster = cluster } } // WithClusterTimeout sets the timeout when waiting for the whole cluster to be // online func WithClusterTimeout(cTo int) Option { return func(options *options) { options.clusterTimeout = time.Duration(cTo) * time.Second } } func defaultOptions() *options { return &options{ cluster: nil, clusterTimeout: time.Minute, duration: time.Minute, kvKeySizeB: 32, kvValueSizeB: 1024, nWorkers: 1, workload: kvWrite, } } golang-github-cowsql-go-cowsql-1.22.0/benchmark/tracker.go000066400000000000000000000053541447672437700235120ustar00rootroot00000000000000package benchmark import ( "fmt" "math" "strings" "sync" "time" ) func durToMs(d time.Duration) string { ms := int64(d / time.Millisecond) rest := int64(d % time.Millisecond) return fmt.Sprintf("%d.%06d", ms, rest) } type measurement struct { start time.Time duration time.Duration } func (m measurement) String() string { return fmt.Sprintf("%v %v", m.start.UnixNano(), durToMs(m.duration)) } type measurementErr struct { start time.Time err error } func (m measurementErr) String() string { return fmt.Sprintf("%v %v", m.start.UnixNano(), m.err) } type tracker struct { lock sync.RWMutex measurements map[work][]measurement errors map[work][]measurementErr } type report struct { n int nErr int totalDuration time.Duration avgDuration time.Duration maxDuration time.Duration minDuration time.Duration measurements []measurement errors []measurementErr } func (r report) String() string { var msb strings.Builder for _, m := range r.measurements { fmt.Fprintf(&msb, "%s\n", m) } var esb strings.Builder for _, e := range r.errors { fmt.Fprintf(&esb, "%s\n", e) } return fmt.Sprintf("n %d\n"+ "n_err %d\n"+ "avg [ms] %s\n"+ "max [ms] %s\n"+ "min [ms] %s\n"+ "measurements [timestamp in ns] [ms]\n%s\n"+ "errors\n%s\n", r.n, r.nErr, durToMs(r.avgDuration), durToMs(r.maxDuration), durToMs(r.minDuration), msb.String(), esb.String()) } func (t *tracker) measure(start time.Time, work work, err *error) { t.lock.Lock() defer t.lock.Unlock() duration := time.Since(start) if *err == nil { m := measurement{start, duration} t.measurements[work] = append(t.measurements[work], m) } else { e := measurementErr{start, *err} t.errors[work] = append(t.errors[work], e) } } func (t *tracker) report() map[work]report { t.lock.RLock() defer t.lock.RUnlock() reports := make(map[work]report) for w := range t.measurements { report := report{ n: len(t.measurements[w]), nErr: len(t.errors[w]), totalDuration: 0, avgDuration: 0, maxDuration: 0, minDuration: time.Duration(math.MaxInt64), measurements: t.measurements[w], errors: t.errors[w], } for _, m := range t.measurements[w] { report.totalDuration += m.duration if m.duration < report.minDuration { report.minDuration = m.duration } if m.duration > report.maxDuration { report.maxDuration = m.duration } } if report.n > 0 { report.avgDuration = report.totalDuration / time.Duration(report.n) } reports[w] = report } return reports } func newTracker() *tracker { return &tracker{ lock: sync.RWMutex{}, measurements: make(map[work][]measurement), errors: make(map[work][]measurementErr), } } golang-github-cowsql-go-cowsql-1.22.0/benchmark/worker.go000066400000000000000000000065131447672437700233660ustar00rootroot00000000000000package benchmark import ( "context" "database/sql" "errors" "fmt" "math/rand" "strings" "time" ) type work int type workerType int func (w work) String() string { switch w { case exec: return "exec" case query: return "query" case none: return "none" default: return "unknown" } } const ( // The type of query to perform none work = iota exec work = iota // a `write` query work = iota // a `read` kvWriter workerType = iota kvReader workerType = iota kvReaderWriter workerType = iota kvReadSql = "SELECT value FROM model WHERE key = ?" kvWriteSql = "INSERT OR REPLACE INTO model(key, value) VALUES(?, ?)" ) // A worker performs the queries to the database and keeps around some state // in order to do that. `lastWork` and `lastArgs` refer to the previously // executed operation and can be used to determine the next work the worker // should perform. `kvKeys` tells the worker which keys it has inserted in the // database. type worker struct { workerType workerType lastWork work lastArgs []interface{} tracker *tracker kvKeySizeB int kvValueSizeB int kvKeys []string } // Thanks to https://stackoverflow.com/a/22892986 var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randSeq(n int) string { b := make([]rune, n) for i := range b { b[i] = letters[rand.Intn(len(letters))] } return string(b) } func (w *worker) randNewKey() string { return randSeq(w.kvKeySizeB) } func (w *worker) randExistingKey() (string, error) { n := len(w.kvKeys) if n == 0 { return "", errors.New("no keys") } return w.kvKeys[rand.Intn(n)], nil } // A mix of random bytes and easily compressable bytes. func (w *worker) randValue() string { return strings.Repeat(randSeq(1), w.kvValueSizeB/2) + randSeq(w.kvValueSizeB/2) } // Returns the type of work to execute and a sql statement with arguments func (w *worker) getWork() (work, string, []interface{}) { switch w.workerType { case kvWriter: k, v := w.randNewKey(), w.randValue() return exec, kvWriteSql, []interface{}{k, v} case kvReaderWriter: read := rand.Intn(2) == 0 if read && len(w.kvKeys) != 0 { k, _ := w.randExistingKey() return query, kvReadSql, []interface{}{k} } k, v := w.randNewKey(), w.randValue() return exec, kvWriteSql, []interface{}{k, v} default: return none, "", []interface{}{} } } // Retrieve a query and execute it against the database func (w *worker) doWork(ctx context.Context, db *sql.DB) { var err error var str string work, q, args := w.getWork() w.lastWork = work w.lastArgs = args switch work { case exec: w.kvKeys = append(w.kvKeys, fmt.Sprintf("%v", (args[0]))) defer w.tracker.measure(time.Now(), work, &err) _, err = db.ExecContext(ctx, q, args...) if err != nil { w.kvKeys = w.kvKeys[:len(w.kvKeys)-1] } case query: defer w.tracker.measure(time.Now(), work, &err) err = db.QueryRowContext(ctx, q, args...).Scan(&str) default: return } } func (w *worker) run(ctx context.Context, db *sql.DB) { for { if ctx.Err() != nil { return } w.doWork(ctx, db) } } func (w *worker) report() map[work]report { return w.tracker.report() } func newWorker(workerType workerType, o *options) *worker { return &worker{ workerType: workerType, kvKeySizeB: o.kvKeySizeB, kvValueSizeB: o.kvValueSizeB, tracker: newTracker(), } } golang-github-cowsql-go-cowsql-1.22.0/client/000077500000000000000000000000001447672437700210455ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/client/client.go000066400000000000000000000167451447672437700226670ustar00rootroot00000000000000package client import ( "context" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/pkg/errors" ) // DialFunc is a function that can be used to establish a network connection. type DialFunc = protocol.DialFunc // Client speaks the cowsql wire protocol. type Client struct { protocol *protocol.Protocol } // Option that can be used to tweak client parameters. type Option func(*options) type options struct { DialFunc DialFunc LogFunc LogFunc } // WithDialFunc sets a custom dial function for creating the client network // connection. func WithDialFunc(dial DialFunc) Option { return func(options *options) { options.DialFunc = dial } } // WithLogFunc sets a custom log function. // connection. func WithLogFunc(log LogFunc) Option { return func(options *options) { options.LogFunc = log } } // New creates a new client connected to the cowsql node with the given // address. func New(ctx context.Context, address string, options ...Option) (*Client, error) { o := defaultOptions() for _, option := range options { option(o) } // Establish the connection. conn, err := o.DialFunc(ctx, address) if err != nil { return nil, errors.Wrap(err, "failed to establish network connection") } protocol, err := protocol.Handshake(ctx, conn, protocol.VersionOne) if err != nil { conn.Close() return nil, err } client := &Client{protocol: protocol} return client, nil } // Leader returns information about the current leader, if any. func (c *Client) Leader(ctx context.Context) (*NodeInfo, error) { request := protocol.Message{} request.Init(16) response := protocol.Message{} response.Init(512) protocol.EncodeLeader(&request) if err := c.protocol.Call(ctx, &request, &response); err != nil { return nil, errors.Wrap(err, "failed to send Leader request") } id, address, err := protocol.DecodeNode(&response) if err != nil { return nil, errors.Wrap(err, "failed to parse Node response") } info := &NodeInfo{ID: id, Address: address} return info, nil } // Cluster returns information about all nodes in the cluster. func (c *Client) Cluster(ctx context.Context) ([]NodeInfo, error) { request := protocol.Message{} request.Init(16) response := protocol.Message{} response.Init(512) protocol.EncodeCluster(&request, protocol.ClusterFormatV1) if err := c.protocol.Call(ctx, &request, &response); err != nil { return nil, errors.Wrap(err, "failed to send Cluster request") } servers, err := protocol.DecodeNodes(&response) if err != nil { return nil, errors.Wrap(err, "failed to parse Node response") } return servers, nil } // File holds the content of a single database file. type File struct { Name string Data []byte } // Dump the content of the database with the given name. Two files will be // returned, the first is the main database file (which has the same name as // the database), the second is the WAL file (which has the same name as the // database plus the suffix "-wal"). func (c *Client) Dump(ctx context.Context, dbname string) ([]File, error) { request := protocol.Message{} request.Init(16) response := protocol.Message{} response.Init(512) protocol.EncodeDump(&request, dbname) if err := c.protocol.Call(ctx, &request, &response); err != nil { return nil, errors.Wrap(err, "failed to send dump request") } files, err := protocol.DecodeFiles(&response) if err != nil { return nil, errors.Wrap(err, "failed to parse files response") } defer files.Close() dump := make([]File, 0) for { name, data := files.Next() if name == "" { break } dump = append(dump, File{Name: name, Data: data}) } return dump, nil } // Add a node to a cluster. // // The new node will have the role specified in node.Role. Note that if the // desired role is Voter, the node being added must be online, since it will be // granted voting rights only once it catches up with the leader's log. func (c *Client) Add(ctx context.Context, node NodeInfo) error { request := protocol.Message{} response := protocol.Message{} request.Init(4096) response.Init(4096) protocol.EncodeAdd(&request, node.ID, node.Address) if err := c.protocol.Call(ctx, &request, &response); err != nil { return err } if err := protocol.DecodeEmpty(&response); err != nil { return err } // If the desired role is spare, there's nothing to do, since all newly // added nodes have the spare role. if node.Role == Spare { return nil } return c.Assign(ctx, node.ID, node.Role) } // Assign a role to a node. // // Possible roles are: // // - Voter: the node will replicate data and participate in quorum. // - StandBy: the node will replicate data but won't participate in quorum. // - Spare: the node won't replicate data and won't participate in quorum. // // If the target node does not exist or has already the desired role, an error // is returned. func (c *Client) Assign(ctx context.Context, id uint64, role NodeRole) error { request := protocol.Message{} response := protocol.Message{} request.Init(4096) response.Init(4096) protocol.EncodeAssign(&request, id, uint64(role)) if err := c.protocol.Call(ctx, &request, &response); err != nil { return err } if err := protocol.DecodeEmpty(&response); err != nil { return err } return nil } // Transfer leadership from the current leader to another node. // // This must be invoked one client connected to the current leader. func (c *Client) Transfer(ctx context.Context, id uint64) error { request := protocol.Message{} response := protocol.Message{} request.Init(4096) response.Init(4096) protocol.EncodeTransfer(&request, id) if err := c.protocol.Call(ctx, &request, &response); err != nil { return err } if err := protocol.DecodeEmpty(&response); err != nil { return err } return nil } // Remove a node from the cluster. func (c *Client) Remove(ctx context.Context, id uint64) error { request := protocol.Message{} request.Init(4096) response := protocol.Message{} response.Init(4096) protocol.EncodeRemove(&request, id) if err := c.protocol.Call(ctx, &request, &response); err != nil { return err } if err := protocol.DecodeEmpty(&response); err != nil { return err } return nil } // NodeMetadata user-defined node-level metadata. type NodeMetadata struct { FailureDomain uint64 Weight uint64 } // Describe returns metadata about the node we're connected with. func (c *Client) Describe(ctx context.Context) (*NodeMetadata, error) { request := protocol.Message{} request.Init(4096) response := protocol.Message{} response.Init(4096) protocol.EncodeDescribe(&request, protocol.RequestDescribeFormatV0) if err := c.protocol.Call(ctx, &request, &response); err != nil { return nil, err } domain, weight, err := protocol.DecodeMetadata(&response) if err != nil { return nil, err } metadata := &NodeMetadata{ FailureDomain: domain, Weight: weight, } return metadata, nil } // Weight updates the weight associated to the node we're connected with. func (c *Client) Weight(ctx context.Context, weight uint64) error { request := protocol.Message{} request.Init(4096) response := protocol.Message{} response.Init(4096) protocol.EncodeWeight(&request, weight) if err := c.protocol.Call(ctx, &request, &response); err != nil { return err } if err := protocol.DecodeEmpty(&response); err != nil { return err } return nil } // Close the client. func (c *Client) Close() error { return c.protocol.Close() } // Create a client options object with sane defaults. func defaultOptions() *options { return &options{ DialFunc: DefaultDialFunc, LogFunc: DefaultLogFunc, } } golang-github-cowsql-go-cowsql-1.22.0/client/client_export_test.go000066400000000000000000000002231447672437700253070ustar00rootroot00000000000000package client import ( "github.com/cowsql/go-cowsql/internal/protocol" ) func (c *Client) Protocol() *protocol.Protocol { return c.protocol } golang-github-cowsql-go-cowsql-1.22.0/client/client_test.go000066400000000000000000000121341447672437700237120ustar00rootroot00000000000000package client_test import ( "context" "fmt" "io/ioutil" "os" "testing" "time" cowsql "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestClient_Leader(t *testing.T) { node, cleanup := newNode(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() client, err := client.New(ctx, node.BindAddress()) require.NoError(t, err) defer client.Close() leader, err := client.Leader(context.Background()) require.NoError(t, err) assert.Equal(t, leader.ID, uint64(1)) assert.Equal(t, leader.Address, "@1001") } func TestClient_Dump(t *testing.T) { node, cleanup := newNode(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() client, err := client.New(ctx, node.BindAddress()) require.NoError(t, err) defer client.Close() // Open a database and create a test table. request := protocol.Message{} request.Init(4096) response := protocol.Message{} response.Init(4096) protocol.EncodeOpen(&request, "test.db", 0, "volatile") p := client.Protocol() err = p.Call(ctx, &request, &response) require.NoError(t, err) db, err := protocol.DecodeDb(&response) require.NoError(t, err) protocol.EncodeExecSQLV0(&request, uint64(db), "CREATE TABLE foo (n INT)", nil) err = p.Call(ctx, &request, &response) require.NoError(t, err) files, err := client.Dump(ctx, "test.db") require.NoError(t, err) require.Len(t, files, 2) assert.Equal(t, "test.db", files[0].Name) assert.Equal(t, 4096, len(files[0].Data)) assert.Equal(t, "test.db-wal", files[1].Name) assert.Equal(t, 8272, len(files[1].Data)) } func TestClient_Cluster(t *testing.T) { node, cleanup := newNode(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() cli, err := client.New(ctx, node.BindAddress()) require.NoError(t, err) defer cli.Close() servers, err := cli.Cluster(context.Background()) require.NoError(t, err) assert.Len(t, servers, 1) assert.Equal(t, servers[0].ID, uint64(1)) assert.Equal(t, servers[0].Address, "@1001") assert.Equal(t, servers[0].Role, client.Voter) } func TestClient_Transfer(t *testing.T) { node1, cleanup := newNode(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() cli, err := client.New(ctx, node1.BindAddress()) require.NoError(t, err) defer cli.Close() node2, cleanup := addNode(t, cli, 2) defer cleanup() err = cli.Assign(context.Background(), 2, client.Voter) require.NoError(t, err) err = cli.Transfer(context.Background(), 2) require.NoError(t, err) leader, err := cli.Leader(context.Background()) require.NoError(t, err) assert.Equal(t, leader.ID, uint64(2)) cli, err = client.New(ctx, node2.BindAddress()) require.NoError(t, err) defer cli.Close() leader, err = cli.Leader(context.Background()) require.NoError(t, err) assert.Equal(t, leader.ID, uint64(2)) } func TestClient_Describe(t *testing.T) { node, cleanup := newNode(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() cli, err := client.New(ctx, node.BindAddress()) require.NoError(t, err) defer cli.Close() metadata, err := cli.Describe(context.Background()) require.NoError(t, err) assert.Equal(t, uint64(0), metadata.FailureDomain) assert.Equal(t, uint64(0), metadata.Weight) require.NoError(t, cli.Weight(context.Background(), 123)) metadata, err = cli.Describe(context.Background()) require.NoError(t, err) assert.Equal(t, uint64(0), metadata.FailureDomain) assert.Equal(t, uint64(123), metadata.Weight) } func newNode(t *testing.T) (*cowsql.Node, func()) { t.Helper() dir, dirCleanup := newDir(t) id := uint64(1) address := fmt.Sprintf("@%d", id+1000) node, err := cowsql.New(uint64(1), address, dir, cowsql.WithBindAddress(address)) require.NoError(t, err) err = node.Start() require.NoError(t, err) cleanup := func() { require.NoError(t, node.Close()) dirCleanup() } return node, cleanup } func addNode(t *testing.T, cli *client.Client, id uint64) (*cowsql.Node, func()) { t.Helper() dir, dirCleanup := newDir(t) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() address := fmt.Sprintf("@%d", id+1000) node, err := cowsql.New(id, address, dir, cowsql.WithBindAddress(address)) require.NoError(t, err) err = node.Start() require.NoError(t, err) info := client.NodeInfo{ ID: id, Address: address, Role: client.Spare, } err = cli.Add(ctx, info) require.NoError(t, err) cleanup := func() { require.NoError(t, node.Close()) dirCleanup() } return node, cleanup } // Return a new temporary directory. func newDir(t *testing.T) (string, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-replication-test-") assert.NoError(t, err) cleanup := func() { _, err := os.Stat(dir) if err != nil { assert.True(t, os.IsNotExist(err)) } else { assert.NoError(t, os.RemoveAll(dir)) } } return dir, cleanup } golang-github-cowsql-go-cowsql-1.22.0/client/constants.go000066400000000000000000000002651447672437700234130ustar00rootroot00000000000000package client import ( "github.com/cowsql/go-cowsql/internal/protocol" ) // Node roles const ( Voter = protocol.Voter StandBy = protocol.StandBy Spare = protocol.Spare ) golang-github-cowsql-go-cowsql-1.22.0/client/database_store.go000066400000000000000000000103441447672437700243560ustar00rootroot00000000000000// +build !nosqlite3 package client import ( "context" "database/sql" "fmt" "strings" "github.com/pkg/errors" _ "github.com/mattn/go-sqlite3" // Go SQLite bindings ) // Option that can be used to tweak node store parameters. type NodeStoreOption func(*nodeStoreOptions) type nodeStoreOptions struct { Where string } // DatabaseNodeStore persists a list addresses of cowsql nodes in a SQL table. type DatabaseNodeStore struct { db *sql.DB // Database handle to use. schema string // Name of the schema holding the servers table. table string // Name of the servers table. column string // Column name in the servers table holding the server address. where string // Optional WHERE filter } // DefaultNodeStore creates a new NodeStore using the given filename. // // If the filename ends with ".yaml" then the YamlNodeStore implementation will // be used. Otherwise the SQLite-based one will be picked, with default names // for the schema, table and column parameters. // // It also creates the table if it doesn't exist yet. func DefaultNodeStore(filename string) (NodeStore, error) { if strings.HasSuffix(filename, ".yaml") { return NewYamlNodeStore(filename) } // Open the database. db, err := sql.Open("sqlite3", filename) if err != nil { return nil, errors.Wrap(err, "failed to open database") } // Since we're setting SQLite single-thread mode, we need to have one // connection at most. db.SetMaxOpenConns(1) // Create the servers table if it does not exist yet. _, err = db.Exec("CREATE TABLE IF NOT EXISTS servers (address TEXT, UNIQUE(address))") if err != nil { return nil, errors.Wrap(err, "failed to create servers table") } store := NewNodeStore(db, "main", "servers", "address") return store, nil } // NewNodeStore creates a new NodeStore. func NewNodeStore(db *sql.DB, schema, table, column string, options ...NodeStoreOption) *DatabaseNodeStore { o := &nodeStoreOptions{} for _, option := range options { option(o) } return &DatabaseNodeStore{ db: db, schema: schema, table: table, column: column, where: o.Where, } } // WithNodeStoreWhereClause configures the node store to append the given // hard-coded where clause to the SELECT query used to fetch nodes. Only the // clause itself must be given, without the "WHERE" prefix. func WithNodeStoreWhereClause(where string) NodeStoreOption { return func(options *nodeStoreOptions) { options.Where = where } } // Get the current servers. func (d *DatabaseNodeStore) Get(ctx context.Context) ([]NodeInfo, error) { tx, err := d.db.Begin() if err != nil { return nil, errors.Wrap(err, "failed to begin transaction") } defer tx.Rollback() query := fmt.Sprintf("SELECT %s FROM %s.%s", d.column, d.schema, d.table) if d.where != "" { query += " WHERE " + d.where } rows, err := tx.QueryContext(ctx, query) if err != nil { return nil, errors.Wrap(err, "failed to query servers table") } defer rows.Close() servers := make([]NodeInfo, 0) for rows.Next() { var address string err := rows.Scan(&address) if err != nil { return nil, errors.Wrap(err, "failed to fetch server address") } servers = append(servers, NodeInfo{ID: 1, Address: address}) } if err := rows.Err(); err != nil { return nil, errors.Wrap(err, "result set failure") } return servers, nil } // Set the servers addresses. func (d *DatabaseNodeStore) Set(ctx context.Context, servers []NodeInfo) error { tx, err := d.db.Begin() if err != nil { return errors.Wrap(err, "failed to begin transaction") } query := fmt.Sprintf("DELETE FROM %s.%s", d.schema, d.table) if _, err := tx.ExecContext(ctx, query); err != nil { tx.Rollback() return errors.Wrap(err, "failed to delete existing servers rows") } query = fmt.Sprintf("INSERT INTO %s.%s(%s) VALUES (?)", d.schema, d.table, d.column) stmt, err := tx.PrepareContext(ctx, query) if err != nil { tx.Rollback() return errors.Wrap(err, "failed to prepare insert statement") } defer stmt.Close() for _, server := range servers { if _, err := stmt.ExecContext(ctx, server.Address); err != nil { tx.Rollback() return errors.Wrapf(err, "failed to insert server %s", server.Address) } } if err := tx.Commit(); err != nil { return errors.Wrap(err, "failed to commit transaction") } return nil } golang-github-cowsql-go-cowsql-1.22.0/client/dial.go000066400000000000000000000020231447672437700223020ustar00rootroot00000000000000package client import ( "context" "crypto/tls" "net" "github.com/cowsql/go-cowsql/internal/protocol" ) // DefaultDialFunc is the default dial function, which can handle plain TCP and // Unix socket endpoints. You can customize it with WithDialFunc() func DefaultDialFunc(ctx context.Context, address string) (net.Conn, error) { return protocol.Dial(ctx, address) } // DialFuncWithTLS returns a dial function that uses TLS encryption. // // The given dial function will be used to establish the network connection, // and the given TLS config will be used for encryption. func DialFuncWithTLS(dial DialFunc, config *tls.Config) DialFunc { return func(ctx context.Context, addr string) (net.Conn, error) { clonedConfig := config.Clone() if len(clonedConfig.ServerName) == 0 { remoteIP, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } clonedConfig.ServerName = remoteIP } conn, err := dial(ctx, addr) if err != nil { return nil, err } return tls.Client(conn, clonedConfig), nil } } golang-github-cowsql-go-cowsql-1.22.0/client/leader.go000066400000000000000000000015221447672437700226300ustar00rootroot00000000000000package client import ( "context" "github.com/cowsql/go-cowsql/internal/protocol" ) // FindLeader returns a Client connected to the current cluster leader. // // The function will iterate through to all nodes in the given store, and for // each of them check if it's the current leader. If no leader is found, the // function will keep retrying (with a capped exponential backoff) until the // given context is canceled. func FindLeader(ctx context.Context, store NodeStore, options ...Option) (*Client, error) { o := defaultOptions() for _, option := range options { option(o) } config := protocol.Config{ Dial: o.DialFunc, } connector := protocol.NewConnector(0, store, config, o.LogFunc) protocol, err := connector.Connect(ctx) if err != nil { return nil, err } client := &Client{protocol: protocol} return client, nil } golang-github-cowsql-go-cowsql-1.22.0/client/leader_test.go000066400000000000000000000017721447672437700236760ustar00rootroot00000000000000package client_test import ( "context" "fmt" "testing" "time" cowsql "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" "github.com/stretchr/testify/require" ) func TestMembership(t *testing.T) { n := 3 nodes := make([]*cowsql.Node, n) infos := make([]client.NodeInfo, n) for i := range nodes { id := uint64(i + 1) address := fmt.Sprintf("@test-%d", id) dir, cleanup := newDir(t) defer cleanup() node, err := cowsql.New(id, address, dir, cowsql.WithBindAddress(address)) require.NoError(t, err) nodes[i] = node infos[i].ID = id infos[i].Address = address err = node.Start() require.NoError(t, err) defer node.Close() } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() store := client.NewInmemNodeStore() store.Set(context.Background(), []client.NodeInfo{infos[0]}) client, err := client.FindLeader(ctx, store) require.NoError(t, err) defer client.Close() err = client.Add(ctx, infos[1]) require.NoError(t, err) } golang-github-cowsql-go-cowsql-1.22.0/client/log.go000066400000000000000000000007641447672437700221640ustar00rootroot00000000000000package client import ( "github.com/cowsql/go-cowsql/logging" ) // LogFunc is a function that can be used for logging. type LogFunc = logging.Func // LogLevel defines the logging level. type LogLevel = logging.Level // Available logging levels. const ( LogNone = logging.None LogDebug = logging.Debug LogInfo = logging.Info LogWarn = logging.Warn LogError = logging.Error ) // DefaultLogFunc doesn't emit any message. func DefaultLogFunc(l LogLevel, format string, a ...interface{}) {} golang-github-cowsql-go-cowsql-1.22.0/client/no_database_store.go000066400000000000000000000006361447672437700250550ustar00rootroot00000000000000// +build nosqlite3 package client import ( "strings" "github.com/pkg/errors" ) // DefaultNodeStore creates a new NodeStore using the given filename. // // The filename must end with ".yaml". func DefaultNodeStore(filename string) (NodeStore, error) { if strings.HasSuffix(filename, ".yaml") { return NewYamlNodeStore(filename) } return nil, errors.New("built without support for DatabaseNodeStore") } golang-github-cowsql-go-cowsql-1.22.0/client/store.go000066400000000000000000000036741447672437700225420ustar00rootroot00000000000000package client import ( "context" "io/ioutil" "os" "sync" "github.com/google/renameio" "gopkg.in/yaml.v2" "github.com/cowsql/go-cowsql/internal/protocol" ) // NodeStore is used by a cowsql client to get an initial list of candidate // cowsql nodes that it can dial in order to find a leader cowsql node to use. type NodeStore = protocol.NodeStore // NodeRole identifies the role of a node. type NodeRole = protocol.NodeRole // NodeInfo holds information about a single server. type NodeInfo = protocol.NodeInfo // InmemNodeStore keeps the list of target cowsql nodes in memory. type InmemNodeStore = protocol.InmemNodeStore // NewInmemNodeStore creates NodeStore which stores its data in-memory. var NewInmemNodeStore = protocol.NewInmemNodeStore // Persists a list addresses of cowsql nodes in a YAML file. type YamlNodeStore struct { path string servers []NodeInfo mu sync.RWMutex } // NewYamlNodeStore creates a new YamlNodeStore backed by the given YAML file. func NewYamlNodeStore(path string) (*YamlNodeStore, error) { servers := []NodeInfo{} _, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { return nil, err } } else { data, err := ioutil.ReadFile(path) if err != nil { return nil, err } if err := yaml.Unmarshal(data, &servers); err != nil { return nil, err } } store := &YamlNodeStore{ path: path, servers: servers, } return store, nil } // Get the current servers. func (s *YamlNodeStore) Get(ctx context.Context) ([]NodeInfo, error) { s.mu.RLock() defer s.mu.RUnlock() ret := make([]NodeInfo, len(s.servers)) copy(ret, s.servers) return ret, nil } // Set the servers addresses. func (s *YamlNodeStore) Set(ctx context.Context, servers []NodeInfo) error { s.mu.Lock() defer s.mu.Unlock() data, err := yaml.Marshal(servers) if err != nil { return err } if err := renameio.WriteFile(s.path, data, 0600); err != nil { return err } s.servers = servers return nil } golang-github-cowsql-go-cowsql-1.22.0/client/store_test.go000066400000000000000000000042771447672437700236010ustar00rootroot00000000000000// +build !nosqlite3 package client_test import ( "context" "database/sql" "testing" cowsql "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/driver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Exercise setting and getting servers in a DatabaseNodeStore created with // DefaultNodeStore. func TestDefaultNodeStore(t *testing.T) { // Create a new default store. store, err := client.DefaultNodeStore(":memory:") require.NoError(t, err) // Set and get some targets. err = store.Set(context.Background(), []client.NodeInfo{ {Address: "1.2.3.4:666"}, {Address: "5.6.7.8:666"}}, ) require.NoError(t, err) servers, err := store.Get(context.Background()) assert.Equal(t, []client.NodeInfo{ {ID: uint64(1), Address: "1.2.3.4:666"}, {ID: uint64(1), Address: "5.6.7.8:666"}}, servers) // Set and get some new targets. err = store.Set(context.Background(), []client.NodeInfo{ {Address: "1.2.3.4:666"}, {Address: "9.9.9.9:666"}, }) require.NoError(t, err) servers, err = store.Get(context.Background()) assert.Equal(t, []client.NodeInfo{ {ID: uint64(1), Address: "1.2.3.4:666"}, {ID: uint64(1), Address: "9.9.9.9:666"}}, servers) // Setting duplicate targets returns an error and the change is not // persisted. err = store.Set(context.Background(), []client.NodeInfo{ {Address: "1.2.3.4:666"}, {Address: "1.2.3.4:666"}, }) assert.EqualError(t, err, "failed to insert server 1.2.3.4:666: UNIQUE constraint failed: servers.address") servers, err = store.Get(context.Background()) assert.Equal(t, []client.NodeInfo{ {ID: uint64(1), Address: "1.2.3.4:666"}, {ID: uint64(1), Address: "9.9.9.9:666"}}, servers) } func TestConfigMultiThread(t *testing.T) { cleanup := dummyDBSetup(t) defer cleanup() err := cowsql.ConfigMultiThread() assert.EqualError(t, err, "SQLite is already initialized") } func dummyDBSetup(t *testing.T) func() { store := client.NewInmemNodeStore() driver, err := driver.New(store) require.NoError(t, err) sql.Register("dummy", driver) db, err := sql.Open("dummy", "test.db") require.NoError(t, err) cleanup := func() { require.NoError(t, db.Close()) } return cleanup } golang-github-cowsql-go-cowsql-1.22.0/cmd/000077500000000000000000000000001447672437700203325ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql-benchmark/000077500000000000000000000000001447672437700235725ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql-benchmark/cowsql-benchmark.go000066400000000000000000000121041447672437700273570ustar00rootroot00000000000000package main import ( "context" "fmt" "os" "os/signal" "path/filepath" "time" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/benchmark" "github.com/pkg/errors" "github.com/spf13/cobra" "golang.org/x/sys/unix" ) const ( defaultClusterTimeout = 120 defaultDir = "/tmp/cowsql-benchmark" defaultDriver = false defaultDurationS = 60 defaultKvKeySize = 32 defaultKvValueSize = 1024 defaultWorkers = 1 defaultWorkload = "kvwrite" docString = "For benchmarking cowsql.\n\n" + "Run a 1 node benchmark:\n" + "cowsql-benchmark -d 127.0.0.1:9001 --driver --cluster 127.0.0.1:9001\n\n" + "Run a multi-node benchmark, the first node will self-elect and become leader,\n" + "the driver flag results in the workload being run from the first, leader node.\n" + "cowsql-benchmark --db 127.0.0.1:9001 --driver --cluster 127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003 &\n" + "cowsql-benchmark --db 127.0.0.1:9002 --join 127.0.0.1:9001 &\n" + "cowsql-benchmark --db 127.0.0.1:9003 --join 127.0.0.1:9001 &\n\n" + "Run a multi-node benchmark, the first node will self-elect and become leader,\n" + "the driver flag results in the workload being run from the third, non-leader node.\n" + "cowsql-benchmark --db 127.0.0.1:9001 &\n" + "cowsql-benchmark --db 127.0.0.1:9002 --join 127.0.0.1:9001 &\n" + "cowsql-benchmark --db 127.0.0.1:9003 --join 127.0.0.1:9001 --driver --cluster 127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003 &\n\n" + "The results can be found on the `driver` node in " + defaultDir + "/results or in the directory provided to the tool.\n" + "Benchmark results are files named `n-q-timestamp` where `n` is the number of the worker,\n" + "`q` is the type of query that was tracked. All results in the file are in milliseconds.\n" ) func signalChannel() chan os.Signal { ch := make(chan os.Signal, 32) signal.Notify(ch, unix.SIGPWR) signal.Notify(ch, unix.SIGINT) signal.Notify(ch, unix.SIGQUIT) signal.Notify(ch, unix.SIGTERM) return ch } func main() { var cluster *[]string var clusterTimeout int var db string var dir string var driver bool var duration int var join *[]string var kvKeySize int var kvValueSize int var workers int var workload string cmd := &cobra.Command{ Use: "cowsql-benchmark", Short: "For benchmarking cowsql", Long: docString, RunE: func(cmd *cobra.Command, args []string) error { dir := filepath.Join(dir, db) if err := os.MkdirAll(dir, 0755); err != nil { return errors.Wrapf(err, "can't create %s", dir) } app, err := app.New(dir, app.WithAddress(db), app.WithCluster(*join)) if err != nil { return err } readyCtx, cancel := context.WithTimeout(context.Background(), time.Duration(clusterTimeout)*time.Second) defer cancel() if err := app.Ready(readyCtx); err != nil { return errors.Wrap(err, "App not ready in time") } ch := signalChannel() if !driver { fmt.Println("Benchmark client ready. Send signal to abort or when done.") select { case <-ch: return nil } } if len(*cluster) == 0 { return fmt.Errorf("driver node, `--cluster` flag must be provided") } db, err := app.Open(context.Background(), "benchmark") if err != nil { return err } db.SetMaxOpenConns(500) db.SetMaxIdleConns(500) bm, err := benchmark.New( app, db, dir, benchmark.WithWorkload(workload), benchmark.WithDuration(duration), benchmark.WithWorkers(workers), benchmark.WithKvKeySize(kvKeySize), benchmark.WithKvValueSize(kvValueSize), benchmark.WithCluster(*cluster), benchmark.WithClusterTimeout(clusterTimeout), ) if err != nil { return err } if err := bm.Run(ch); err != nil { return err } db.Close() app.Close() return nil }, } flags := cmd.Flags() flags.StringVarP(&db, "db", "d", "", "Address used for internal database replication.") join = flags.StringSliceP("join", "j", nil, "Database addresses of existing nodes.") cluster = flags.StringSliceP("cluster", "c", nil, "Database addresses of all nodes taking part in the benchmark.\n"+ "The driver will wait for all nodes to be online before running the benchmark.") flags.IntVar(&clusterTimeout, "cluster-timeout", defaultClusterTimeout, "How long the benchmark should wait in seconds for the whole cluster to be online.") flags.StringVarP(&dir, "dir", "D", defaultDir, "Data directory.") flags.StringVarP(&workload, "workload", "w", defaultWorkload, "The workload to run: \"kvwrite\" or \"kvreadwrite\".") flags.BoolVar(&driver, "driver", defaultDriver, "Set this flag to run the benchmark from this instance. Must be set on 1 node.") flags.IntVar(&duration, "duration", defaultDurationS, "Run duration in seconds.") flags.IntVar(&workers, "workers", defaultWorkers, "Number of workers executing the workload.") flags.IntVar(&kvKeySize, "key-size", defaultKvKeySize, "Size of the KV keys in bytes.") flags.IntVar(&kvValueSize, "value-size", defaultKvValueSize, "Size of the KV values in bytes.") cmd.MarkFlagRequired("db") if err := cmd.Execute(); err != nil { os.Exit(1) } } golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql-demo/000077500000000000000000000000001447672437700225645ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql-demo/cowsql-demo.go000066400000000000000000000075731447672437700253610ustar00rootroot00000000000000package main import ( "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "log" "net" "net/http" "os" "os/signal" "path/filepath" "strings" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/client" "github.com/pkg/errors" "github.com/spf13/cobra" "golang.org/x/sys/unix" ) func main() { var api string var db string var join *[]string var dir string var verbose bool var crt string var key string cmd := &cobra.Command{ Use: "cowsql-demo", Short: "Demo application using cowsql", Long: `This demo shows how to integrate a Go application with cowsql. Complete documentation is available at https://github.com/cowsql/go-cowsql`, RunE: func(cmd *cobra.Command, args []string) error { dir := filepath.Join(dir, db) if err := os.MkdirAll(dir, 0755); err != nil { return errors.Wrapf(err, "can't create %s", dir) } logFunc := func(l client.LogLevel, format string, a ...interface{}) { if !verbose { return } log.Printf(fmt.Sprintf("%s: %s: %s\n", api, l.String(), format), a...) } options := []app.Option{app.WithAddress(db), app.WithCluster(*join), app.WithLogFunc(logFunc)} // Set TLS options if (crt != "" && key == "") || (key != "" && crt == "") { return fmt.Errorf("both TLS certificate and key must be given") } if crt != "" { cert, err := tls.LoadX509KeyPair(crt, key) if err != nil { return err } data, err := ioutil.ReadFile(crt) if err != nil { return err } pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(data) { return fmt.Errorf("bad certificate") } options = append(options, app.WithTLS(app.SimpleTLSConfig(cert, pool))) } app, err := app.New(dir, options...) if err != nil { return err } if err := app.Ready(context.Background()); err != nil { return err } db, err := app.Open(context.Background(), "demo") if err != nil { return err } if _, err := db.Exec(schema); err != nil { return err } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { key := strings.TrimLeft(r.URL.Path, "/") result := "" switch r.Method { case "GET": row := db.QueryRow(query, key) if err := row.Scan(&result); err != nil { result = fmt.Sprintf("Error: %s", err.Error()) } break case "PUT": result = "done" value, _ := ioutil.ReadAll(r.Body) if _, err := db.Exec(update, key, string(value[:])); err != nil { result = fmt.Sprintf("Error: %s", err.Error()) } default: result = fmt.Sprintf("Error: unsupported method %q", r.Method) } fmt.Fprintf(w, "%s\n", result) }) listener, err := net.Listen("tcp", api) if err != nil { return err } go http.Serve(listener, nil) ch := make(chan os.Signal, 32) signal.Notify(ch, unix.SIGPWR) signal.Notify(ch, unix.SIGINT) signal.Notify(ch, unix.SIGQUIT) signal.Notify(ch, unix.SIGTERM) <-ch listener.Close() db.Close() app.Handover(context.Background()) app.Close() return nil }, } flags := cmd.Flags() flags.StringVarP(&api, "api", "a", "", "address used to expose the demo API") flags.StringVarP(&db, "db", "d", "", "address used for internal database replication") join = flags.StringSliceP("join", "j", nil, "database addresses of existing nodes") flags.StringVarP(&dir, "dir", "D", "/tmp/cowsql-demo", "data directory") flags.BoolVarP(&verbose, "verbose", "v", false, "verbose logging") flags.StringVarP(&crt, "cert", "c", "", "public TLS cert") flags.StringVarP(&key, "key", "k", "", "private TLS key") cmd.MarkFlagRequired("api") cmd.MarkFlagRequired("db") if err := cmd.Execute(); err != nil { os.Exit(1) } } const ( schema = "CREATE TABLE IF NOT EXISTS model (key TEXT, value TEXT, UNIQUE(key))" query = "SELECT value FROM model WHERE key = ?" update = "INSERT OR REPLACE INTO model(key, value) VALUES(?, ?)" ) golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql/000077500000000000000000000000001447672437700216425ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/cmd/cowsql/cowsql.go000066400000000000000000000062061447672437700235050ustar00rootroot00000000000000package main import ( "context" "crypto/tls" "crypto/x509" "fmt" "io" "io/ioutil" "os" "strings" "github.com/cowsql/go-cowsql/app" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/internal/shell" "github.com/peterh/liner" "github.com/spf13/cobra" ) func main() { var crt string var key string var servers *[]string var format string cmd := &cobra.Command{ Use: "cowsql -s [command]", Short: "Standard cowsql shell", Args: cobra.RangeArgs(1, 2), RunE: func(cmd *cobra.Command, args []string) error { if len(*servers) == 0 { return fmt.Errorf("no servers provided") } var store client.NodeStore var err error first := (*servers)[0] if strings.HasPrefix(first, "file://") { if len(*servers) > 1 { return fmt.Errorf("can't mix server store and explicit list") } path := first[len("file://"):] if _, err := os.Stat(path); err != nil { return fmt.Errorf("open servers store: %w", err) } store, err = client.DefaultNodeStore(path) if err != nil { return fmt.Errorf("open servers store: %w", err) } } else { infos := make([]client.NodeInfo, len(*servers)) for i, address := range *servers { infos[i].Address = address } store = client.NewInmemNodeStore() store.Set(context.Background(), infos) } if (crt != "" && key == "") || (key != "" && crt == "") { return fmt.Errorf("both TLS certificate and key must be given") } dial := client.DefaultDialFunc if crt != "" { cert, err := tls.LoadX509KeyPair(crt, key) if err != nil { return err } data, err := ioutil.ReadFile(crt) if err != nil { return err } pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(data) { return fmt.Errorf("bad certificate") } config := app.SimpleDialTLSConfig(cert, pool) dial = client.DialFuncWithTLS(dial, config) } sh, err := shell.New(args[0], store, shell.WithDialFunc(dial), shell.WithFormat(format)) if err != nil { return err } if len(args) > 1 { for _, input := range strings.Split(args[1], ";") { result, err := sh.Process(context.Background(), input) if err != nil { return err } else if result != "" { fmt.Println(result) } } return nil } line := liner.NewLiner() defer line.Close() for { input, err := line.Prompt("cowsql> ") if err != nil { if err == io.EOF { break } return err } result, err := sh.Process(context.Background(), input) if err != nil { fmt.Println("Error: ", err) } else { line.AppendHistory(input) if result != "" { fmt.Println(result) } } } return nil }, } flags := cmd.Flags() servers = flags.StringSliceP("servers", "s", nil, "comma-separated list of db servers, or file://") flags.StringVarP(&crt, "cert", "c", "", "public TLS cert") flags.StringVarP(&key, "key", "k", "", "private TLS key") flags.StringVarP(&format, "format", "f", "tabular", "output format (tabular, json)") cmd.MarkFlagRequired("servers") if err := cmd.Execute(); err != nil { os.Exit(1) } } golang-github-cowsql-go-cowsql-1.22.0/config.go000066400000000000000000000031651447672437700213700ustar00rootroot00000000000000// +build !nosqlite3 package cowsql import ( "fmt" "os" "github.com/cowsql/go-cowsql/internal/bindings" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/pkg/errors" ) // ConfigMultiThread sets the threading mode of SQLite to Multi-thread. // // By default go-cowsql configures SQLite to Single-thread mode, because the // cowsql engine itself is single-threaded, and enabling Multi-thread or // Serialized modes would incur in a performance penality. // // If your Go process also uses SQLite directly (e.g. using the // github.com/mattn/go-sqlite3 bindings) you might need to switch to // Multi-thread mode in order to be thread-safe. // // IMPORTANT: It's possible to successfully change SQLite's threading mode only // if no SQLite APIs have been invoked yet (e.g. no database has been opened // yet). Therefore you'll typically want to call ConfigMultiThread() very early // in your process setup. Alternatively you can set the GO_COWSQL_MULTITHREAD // environment variable to 1 at process startup, in order to prevent go-cowsql // from setting Single-thread mode at all. func ConfigMultiThread() error { if err := bindings.ConfigMultiThread(); err != nil { if err, ok := err.(protocol.Error); ok && err.Code == 21 /* SQLITE_MISUSE */ { return fmt.Errorf("SQLite is already initialized") } return errors.Wrap(err, "unknown error") } return nil } func init() { // Don't enable single thread mode by default if GO_COWSQL_MULTITHREAD // is set. if os.Getenv("GO_COWSQL_MULTITHREAD") == "1" { return } err := bindings.ConfigSingleThread() if err != nil { panic(errors.Wrap(err, "set single thread mode")) } } golang-github-cowsql-go-cowsql-1.22.0/docs/000077500000000000000000000000001447672437700205175ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/docs/restore-db.md000066400000000000000000000154371447672437700231210ustar00rootroot00000000000000Note this document is not complete and work in progress. # A. INFO **Always backup your database folders before performing any of the steps described below and make sure no cowsql nodes are running!** ## A.1 cluster.yaml File containing the node configuration of your installation. ### Contents ``` - Address: 127.0.0.1:9001 ID: 1 Role: 0 - Address: 127.0.0.1:9002 ID: 2 Role: 1 - Address: 127.0.0.1:9003 ID: 3 Role: 0 ``` *Address* : The `host:port` that will be used in database replication, we will refer to this as `DbAddress`. *ID* : Raft id of the node *Role* : - 0: Voter, takes part in quorum, replicates DB. - 1: Standby, doesn't take part in quorum, replicates DB. - 2: Backup, doesn't take part in quorum, doesn't replicate DB. ## A.2 info.yaml File containing the node specific information. ### Contents ``` Address: 127.0.0.1:9001 ID: 1 Role: 0 ``` ## A.3 Finding the node with the most up-to-date data. 1. For every known node, make a new directory, `NodeDirectory` and copy the data directory of the node into it. 2. Make a `cluster.yaml` conforming to the structure layed out above, with the desired node configuration, save it at `TargetClusterYamlPath`. e.g. you can perform: ``` cat < "cluster.yaml" - Address: 127.0.0.1:9001 ID: 1 Role: 0 - Address: 127.0.0.1:9002 ID: 2 Role: 1 - Address: 127.0.0.1:9003 ID: 3 Role: 0 EOF ``` 3. For every node, run `cowsql -s ".reconfigure "` The `DbAddress`, `DbName` aren't really important, just use something syntactically correct, we are more interested in the side effects of this command on the `NodeDirectory`. The command should return `OK`. 4. Look in the `NodeDirectory` of every node, there should be at least 1 new segment file e.g. `0000000057688811-0000000057688811` with the start index (the number before `-`) equal to the end index (the number after `-`), this will be the most recently created segment file. Remember this index. 5. The node with the highest index from the previous step has the most up-to-date data. If there is an ex aequo, pick one. note: A new command that doesn't rely on the side effects of the `.reconfigure` command will be added in the future. # B. Restoring Data ## B.1 Loading existing data and existing network/node configuration in `cowsql-demo` *Use this when you have access to the machines where the database lives and want to start the database with the unaltered data of every node.* 0. Stop all database nodes & backup all the database folders. 1. Make a base directory for your data e.g. `data`, we will refer to this as the `DataDirectory`. 2. For every node in `cluster.yaml`, create a directory with name equal to `DbAddress` under the `DataDirectory`, unique to the node, this `host:port` will be needed later on for the `--db` argument when you start the `cowsql-demo` application, e.g. for node 1 you now have a directory `data/127.0.0.1:9001`. We will refer to this as the `NodeDirectory`. 3. For every node in `cluster.yaml`, copy all the data for that node to its `NodeDirectory`. 4. For every node in `cluster.yaml`, make sure there exists an `info.yaml` in `NodeDirectory` that contains the information as found in `cluster.yaml`. 5. For every node in `cluster.yaml`, run: `cowsql-demo --dir --api --db `, where `ApiAddress` is a `host:port`, e.g. `cowsql-demo --dir data --api 127.0.0.1:8001 --db 127.0.0.1:9001`. Remark that it is important that `--dir` is a path to the newly created `DataDirectory`, otherwise the demo will create a new directory without the existing data. 6. You should have an operational cluster, access it through e.g. the `cowsql` cli tool. ## B.2 Restore existing data and new network/node configuration in `cowsql-demo`. *Use this when you don't have access to the machines where the database lives and want to start the database with data from a specific node or when you have access to the machines but the cluster has to be reconfigured or repaired.* 0. Stop all database nodes & backup all the database folders. 1. Create a `cluster.yaml` containing your desired node configuration. We will refer to this file by `TargetClusterYaml` and to its location by `TargetClusterYamlPath`. 2. Follow steps 1 and 2 of part `B.1`, where `cluster.yaml` should be interpreted as `TargetClusterYaml`. 3. Find the node with the most up-to-date data following the steps in `A.3`, but use the directories and `cluster.yaml` created in the previous steps. 4. For every non up-to-date node, remove the data files and metadata files from the `NodeDirectory`. 5. For every non up-to-date node, copy the data files of the node with the most up-to-date data to the `NodeDirectory`, don't copy the metadata1 & metadata2 files over. 6. For every node, copy `TargetClusterYaml` to `NodeDirectory`, overwriting `cluster.yaml` that's already there. 7. For every node, make sure there is an `info.yaml` in `NodeDirectory` that is in line with `cluster.yaml` and correct for that node. 8. For every node, run: `cowsql-demo --dir --api --db `. 9. You should have an operational cluster, access it through e.g. the `cowsql` cli tool. ## Terminology - ApiAddress: `host:port` where the `cowsql-demo` REST api is available. - DataDirectory: Base directory under which the NodeDirectories are saved. - data file: segment file, snapshot file or snapshot.meta file. - DbAddress: `host:port` used for database replication. - DbName: name of the sqlite database. - metadata file: file named `metadata1` or `metadata2`. - NodeDirectory: Directory where node specific data is saved, for `cowsql-demo` it should be named `DbAddress` and exist under `DataDirectory`. - segment file: file named like `0000000057685378-0000000057685875`, meaning `startindex-endindex`, these contain raft log entries. - snapshot file: file named like `snapshot-2818-57687002-3645852168`, meaning `snapshot-term-index-timestamp`. - snapshot.meta file: file named like `snapshot-2818-57687002-3645852168.meta`, contains metadata about the matching snapshot file. - TargetClusterYaml: `cluster.yaml` file containing the desired cluster configuration. - TargetClusterYamlPath: location of `TargetClusterYaml`. # C. Startup Errors ## C.1 raft_start(): io: closed segment 0000xxxx-0000xxxx is past last snapshot-x-xxxx-xxxxxx ### C.1.1 Method with data loss This situation can happen when you only have 1 node for example. 1. Backup your data folder and stop the database. 2. Remove the offending segment and try to start again. 3. Repeat step 2 if another segment is preventing you from starting. ### C.1.2 Method preventing data loss 1. Backup your data folders and stop the database. 2. TODO [Variation of the restoring data process] golang-github-cowsql-go-cowsql-1.22.0/driver/000077500000000000000000000000001447672437700210625ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/driver/driver.go000066400000000000000000000612471447672437700227160ustar00rootroot00000000000000// Copyright 2017 Canonical Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "context" "database/sql/driver" "fmt" "io" "math" "net" "reflect" "syscall" "time" "github.com/pkg/errors" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/internal/protocol" ) // Driver perform queries against a cowsql server. type Driver struct { log client.LogFunc // Log function to use store client.NodeStore // Holds addresses of cowsql servers context context.Context // Global cancellation context connectionTimeout time.Duration // Max time to wait for a new connection contextTimeout time.Duration // Default client context timeout. clientConfig protocol.Config // Configuration for cowsql client instances tracing client.LogLevel // Whether to trace statements } // Error is returned in case of database errors. type Error = protocol.Error // Error codes. Values here mostly overlap with native SQLite codes. const ( ErrBusy = 5 ErrBusyRecovery = 5 | (1 << 8) ErrBusySnapshot = 5 | (2 << 8) errIoErr = 10 errIoErrNotLeader = errIoErr | 40<<8 errIoErrLeadershipLost = errIoErr | (41 << 8) errNotFound = 12 // Legacy error codes before version-3.32.1+replication4. Kept here // for backward compatibility, but should eventually be dropped. errIoErrNotLeaderLegacy = errIoErr | 32<<8 errIoErrLeadershipLostLegacy = errIoErr | (33 << 8) ) // Option can be used to tweak driver parameters. type Option func(*options) // NodeStore is a convenience alias of client.NodeStore. type NodeStore = client.NodeStore // NodeInfo is a convenience alias of client.NodeInfo. type NodeInfo = client.NodeInfo // DefaultNodeStore is a convenience alias of client.DefaultNodeStore. var DefaultNodeStore = client.DefaultNodeStore // WithLogFunc sets a custom logging function. func WithLogFunc(log client.LogFunc) Option { return func(options *options) { options.Log = log } } // DialFunc is a function that can be used to establish a network connection // with a cowsql node. type DialFunc = protocol.DialFunc // WithDialFunc sets a custom dial function. func WithDialFunc(dial DialFunc) Option { return func(options *options) { options.Dial = protocol.DialFunc(dial) } } // WithConnectionTimeout sets the connection timeout. // // If not used, the default is 5 seconds. // // DEPRECATED: Connection cancellation is supported via the driver.Connector // interface, which is used internally by the stdlib sql package. func WithConnectionTimeout(timeout time.Duration) Option { return func(options *options) { options.ConnectionTimeout = timeout } } // WithConnectionBackoffFactor sets the exponential backoff factor for retrying // failed connection attempts. // // If not used, the default is 100 milliseconds. func WithConnectionBackoffFactor(factor time.Duration) Option { return func(options *options) { options.ConnectionBackoffFactor = factor } } // WithConnectionBackoffCap sets the maximum connection retry backoff value, // (regardless of the backoff factor) for retrying failed connection attempts. // // If not used, the default is 1 second. func WithConnectionBackoffCap(cap time.Duration) Option { return func(options *options) { options.ConnectionBackoffCap = cap } } // WithAttemptTimeout sets the timeout for each individual connection attempt. // // The Connector.Connect() and Driver.Open() methods try to find the current // leader among the servers in the store that was passed to New(). Each time // they attempt to probe an individual server for leadership this timeout will // apply, so a server which accepts the connection but it's then unresponsive // won't block the line. // // If not used, the default is 15 seconds. func WithAttemptTimeout(timeout time.Duration) Option { return func(options *options) { options.AttemptTimeout = timeout } } // WithRetryLimit sets the maximum number of connection retries. // // If not used, the default is 0 (unlimited retries) func WithRetryLimit(limit uint) Option { return func(options *options) { options.RetryLimit = limit } } // WithContext sets a global cancellation context. // // DEPRECATED: This API is no a no-op. Users should explicitly pass a context // if they wish to cancel their requests. func WithContext(context context.Context) Option { return func(options *options) { options.Context = context } } // WithContextTimeout sets the default client context timeout for DB.Begin() // when no context deadline is provided. // // DEPRECATED: Users should use db APIs that support contexts if they wish to // cancel their requests. func WithContextTimeout(timeout time.Duration) Option { return func(options *options) { options.ContextTimeout = timeout } } // WithTracing will emit a log message at the given level every time a // statement gets executed. func WithTracing(level client.LogLevel) Option { return func(options *options) { options.Tracing = level } } // NewDriver creates a new cowsql driver, which also implements the // driver.Driver interface. func New(store client.NodeStore, options ...Option) (*Driver, error) { o := defaultOptions() for _, option := range options { option(o) } driver := &Driver{ log: o.Log, store: store, context: o.Context, connectionTimeout: o.ConnectionTimeout, contextTimeout: o.ContextTimeout, tracing: o.Tracing, clientConfig: protocol.Config{ Dial: o.Dial, AttemptTimeout: o.AttemptTimeout, BackoffFactor: o.ConnectionBackoffFactor, BackoffCap: o.ConnectionBackoffCap, RetryLimit: o.RetryLimit, }, } return driver, nil } // Hold configuration options for a cowsql driver. type options struct { Log client.LogFunc Dial protocol.DialFunc AttemptTimeout time.Duration ConnectionTimeout time.Duration ContextTimeout time.Duration ConnectionBackoffFactor time.Duration ConnectionBackoffCap time.Duration RetryLimit uint Context context.Context Tracing client.LogLevel } // Create a options object with sane defaults. func defaultOptions() *options { return &options{ Log: client.DefaultLogFunc, Dial: client.DefaultDialFunc, Tracing: client.LogNone, } } // A Connector represents a driver in a fixed configuration and can create any // number of equivalent Conns for use by multiple goroutines. type Connector struct { uri string driver *Driver } // Connect returns a connection to the database. func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { if c.driver.context != nil { ctx = c.driver.context } if c.driver.connectionTimeout != 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, c.driver.connectionTimeout) defer cancel() } // TODO: generate a client ID. connector := protocol.NewConnector(0, c.driver.store, c.driver.clientConfig, c.driver.log) conn := &Conn{ log: c.driver.log, contextTimeout: c.driver.contextTimeout, tracing: c.driver.tracing, } var err error conn.protocol, err = connector.Connect(ctx) if err != nil { return nil, errors.Wrap(err, "failed to create cowsql connection") } conn.request.Init(4096) conn.response.Init(4096) protocol.EncodeOpen(&conn.request, c.uri, 0, "volatile") if err := conn.protocol.Call(ctx, &conn.request, &conn.response); err != nil { conn.protocol.Close() return nil, errors.Wrap(err, "failed to open database") } conn.id, err = protocol.DecodeDb(&conn.response) if err != nil { conn.protocol.Close() return nil, errors.Wrap(err, "failed to open database") } return conn, nil } // Driver returns the underlying Driver of the Connector, func (c *Connector) Driver() driver.Driver { return c.driver } // OpenConnector must parse the name in the same format that Driver.Open // parses the name parameter. func (d *Driver) OpenConnector(name string) (driver.Connector, error) { connector := &Connector{ uri: name, driver: d, } return connector, nil } // Open establishes a new connection to a SQLite database on the cowsql server. // // The given name must be a pure file name without any directory segment, // cowsql will connect to a database with that name in its data directory. // // Query parameters are always valid except for "mode=memory". // // If this node is not the leader, or the leader is unknown an ErrNotLeader // error is returned. func (d *Driver) Open(uri string) (driver.Conn, error) { connector, err := d.OpenConnector(uri) if err != nil { return nil, err } return connector.Connect(context.Background()) } // SetContextTimeout sets the default client timeout when no context deadline // is provided. // // DEPRECATED: This API is no a no-op. Users should explicitly pass a context // if they wish to cancel their requests, or use the WithContextTimeout option. func (d *Driver) SetContextTimeout(timeout time.Duration) {} // ErrNoAvailableLeader is returned as root cause of Open() if there's no // leader available in the cluster. var ErrNoAvailableLeader = protocol.ErrNoAvailableLeader // Conn implements the sql.Conn interface. type Conn struct { log client.LogFunc protocol *protocol.Protocol request protocol.Message response protocol.Message id uint32 // Database ID. contextTimeout time.Duration tracing client.LogLevel } // PrepareContext returns a prepared statement, bound to this connection. // context is for the preparation of the statement, it must not store the // context within the statement itself. func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { stmt := &Stmt{ protocol: c.protocol, request: &c.request, response: &c.response, log: c.log, tracing: c.tracing, } protocol.EncodePrepare(&c.request, uint64(c.id), query) var start time.Time if c.tracing != client.LogNone { start = time.Now() } err := c.protocol.Call(ctx, &c.request, &c.response) if c.tracing != client.LogNone { c.log(c.tracing, "%.3fs request prepared: %q", time.Since(start).Seconds(), query) } if err != nil { return nil, driverError(c.log, err) } stmt.db, stmt.id, stmt.params, err = protocol.DecodeStmt(&c.response) if err != nil { return nil, driverError(c.log, err) } if c.tracing != client.LogNone { stmt.sql = query } return stmt, nil } // Prepare returns a prepared statement, bound to this connection. func (c *Conn) Prepare(query string) (driver.Stmt, error) { return c.PrepareContext(context.Background(), query) } // ExecContext is an optional interface that may be implemented by a Conn. func (c *Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { if int64(len(args)) > math.MaxUint32 { return nil, driverError(c.log, fmt.Errorf("too many parameters (%d)", len(args))) } else if len(args) > math.MaxUint8 { protocol.EncodeExecSQLV1(&c.request, uint64(c.id), query, args) } else { protocol.EncodeExecSQLV0(&c.request, uint64(c.id), query, args) } var start time.Time if c.tracing != client.LogNone { start = time.Now() } err := c.protocol.Call(ctx, &c.request, &c.response) if c.tracing != client.LogNone { c.log(c.tracing, "%.3fs request exec: %q", time.Since(start).Seconds(), query) } if err != nil { return nil, driverError(c.log, err) } var result protocol.Result result, err = protocol.DecodeResult(&c.response) if err != nil { return nil, driverError(c.log, err) } return &Result{result: result}, nil } // Query is an optional interface that may be implemented by a Conn. func (c *Conn) Query(query string, args []driver.Value) (driver.Rows, error) { return c.QueryContext(context.Background(), query, valuesToNamedValues(args)) } // QueryContext is an optional interface that may be implemented by a Conn. func (c *Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { if int64(len(args)) > math.MaxUint32 { return nil, driverError(c.log, fmt.Errorf("too many parameters (%d)", len(args))) } else if len(args) > math.MaxUint8 { protocol.EncodeQuerySQLV1(&c.request, uint64(c.id), query, args) } else { protocol.EncodeQuerySQLV0(&c.request, uint64(c.id), query, args) } var start time.Time if c.tracing != client.LogNone { start = time.Now() } err := c.protocol.Call(ctx, &c.request, &c.response) if c.tracing != client.LogNone { c.log(c.tracing, "%.3fs request query: %q", time.Since(start).Seconds(), query) } if err != nil { return nil, driverError(c.log, err) } var rows protocol.Rows rows, err = protocol.DecodeRows(&c.response) if err != nil { return nil, driverError(c.log, err) } return &Rows{ ctx: ctx, request: &c.request, response: &c.response, protocol: c.protocol, rows: rows, log: c.log, }, nil } // Exec is an optional interface that may be implemented by a Conn. func (c *Conn) Exec(query string, args []driver.Value) (driver.Result, error) { return c.ExecContext(context.Background(), query, valuesToNamedValues(args)) } // Close invalidates and potentially stops any current prepared statements and // transactions, marking this connection as no longer in use. // // Because the sql package maintains a free pool of connections and only calls // Close when there's a surplus of idle connections, it shouldn't be necessary // for drivers to do their own connection caching. func (c *Conn) Close() error { return c.protocol.Close() } // BeginTx starts and returns a new transaction. If the context is canceled by // the user the sql package will call Tx.Rollback before discarding and closing // the connection. // // This must check opts.Isolation to determine if there is a set isolation // level. If the driver does not support a non-default level and one is set or // if there is a non-default isolation level that is not supported, an error // must be returned. // // This must also check opts.ReadOnly to determine if the read-only value is // true to either set the read-only transaction property if supported or return // an error if it is not supported. func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { if _, err := c.ExecContext(ctx, "BEGIN", nil); err != nil { return nil, err } tx := &Tx{ conn: c, log: c.log, } return tx, nil } // Begin starts and returns a new transaction. // // Deprecated: Drivers should implement ConnBeginTx instead (or additionally). func (c *Conn) Begin() (driver.Tx, error) { ctx := context.Background() if c.contextTimeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(context.Background(), c.contextTimeout) defer cancel() } return c.BeginTx(ctx, driver.TxOptions{}) } // Tx is a transaction. type Tx struct { conn *Conn log client.LogFunc } // Commit the transaction. func (tx *Tx) Commit() error { ctx := context.Background() if _, err := tx.conn.ExecContext(ctx, "COMMIT", nil); err != nil { return driverError(tx.log, err) } return nil } // Rollback the transaction. func (tx *Tx) Rollback() error { ctx := context.Background() if _, err := tx.conn.ExecContext(ctx, "ROLLBACK", nil); err != nil { return driverError(tx.log, err) } return nil } // Stmt is a prepared statement. It is bound to a Conn and not // used by multiple goroutines concurrently. type Stmt struct { protocol *protocol.Protocol request *protocol.Message response *protocol.Message db uint32 id uint32 params uint64 log client.LogFunc sql string // Prepared SQL, only set when tracing tracing client.LogLevel } // Close closes the statement. func (s *Stmt) Close() error { protocol.EncodeFinalize(s.request, s.db, s.id) ctx := context.Background() if err := s.protocol.Call(ctx, s.request, s.response); err != nil { return driverError(s.log, err) } if err := protocol.DecodeEmpty(s.response); err != nil { return driverError(s.log, err) } return nil } // NumInput returns the number of placeholder parameters. func (s *Stmt) NumInput() int { return int(s.params) } // ExecContext executes a query that doesn't return rows, such // as an INSERT or UPDATE. // // ExecContext must honor the context timeout and return when it is canceled. func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { if int64(len(args)) > math.MaxUint32 { return nil, driverError(s.log, fmt.Errorf("too many parameters (%d)", len(args))) } else if len(args) > math.MaxUint8 { protocol.EncodeExecV1(s.request, s.db, s.id, args) } else { protocol.EncodeExecV0(s.request, s.db, s.id, args) } var start time.Time if s.tracing != client.LogNone { start = time.Now() } err := s.protocol.Call(ctx, s.request, s.response) if s.tracing != client.LogNone { s.log(s.tracing, "%.3fs request prepared: %q", time.Since(start).Seconds(), s.sql) } if err != nil { return nil, driverError(s.log, err) } var result protocol.Result result, err = protocol.DecodeResult(s.response) if err != nil { return nil, driverError(s.log, err) } return &Result{result: result}, nil } // Exec executes a query that doesn't return rows, such func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) { return s.ExecContext(context.Background(), valuesToNamedValues(args)) } // QueryContext executes a query that may return rows, such as a // SELECT. // // QueryContext must honor the context timeout and return when it is canceled. func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { if int64(len(args)) > math.MaxUint32 { return nil, driverError(s.log, fmt.Errorf("too many parameters (%d)", len(args))) } else if len(args) > math.MaxUint8 { protocol.EncodeQueryV1(s.request, s.db, s.id, args) } else { protocol.EncodeQueryV0(s.request, s.db, s.id, args) } var start time.Time if s.tracing != client.LogNone { start = time.Now() } err := s.protocol.Call(ctx, s.request, s.response) if s.tracing != client.LogNone { s.log(s.tracing, "%.3fs request prepared: %q", time.Since(start).Seconds(), s.sql) } if err != nil { return nil, driverError(s.log, err) } var rows protocol.Rows rows, err = protocol.DecodeRows(s.response) if err != nil { return nil, driverError(s.log, err) } return &Rows{ctx: ctx, request: s.request, response: s.response, protocol: s.protocol, rows: rows}, nil } // Query executes a query that may return rows, such as a func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) { return s.QueryContext(context.Background(), valuesToNamedValues(args)) } // Result is the result of a query execution. type Result struct { result protocol.Result } // LastInsertId returns the database's auto-generated ID // after, for example, an INSERT into a table with primary // key. func (r *Result) LastInsertId() (int64, error) { return int64(r.result.LastInsertID), nil } // RowsAffected returns the number of rows affected by the // query. func (r *Result) RowsAffected() (int64, error) { return int64(r.result.RowsAffected), nil } // Rows is an iterator over an executed query's results. type Rows struct { ctx context.Context protocol *protocol.Protocol request *protocol.Message response *protocol.Message rows protocol.Rows consumed bool types []string log client.LogFunc } // Columns returns the names of the columns. The number of // columns of the result is inferred from the length of the // slice. If a particular column name isn't known, an empty // string should be returned for that entry. func (r *Rows) Columns() []string { return r.rows.Columns } // Close closes the rows iterator. func (r *Rows) Close() error { err := r.rows.Close() // If we consumed the whole result set, there's nothing to do as // there's no pending response from the server. if r.consumed { return nil } // If there is was a single-response result set, we're done. if err == io.EOF { return nil } // Let's issue an interrupt request and wait until we get an empty // response, signalling that the query was interrupted. if err := r.protocol.Interrupt(r.ctx, r.request, r.response); err != nil { return driverError(r.log, err) } return nil } // Next is called to populate the next row of data into // the provided slice. The provided slice will be the same // size as the Columns() are wide. // // Next should return io.EOF when there are no more rows. func (r *Rows) Next(dest []driver.Value) error { err := r.rows.Next(dest) if err == protocol.ErrRowsPart { r.rows.Close() if err := r.protocol.More(r.ctx, r.response); err != nil { return driverError(r.log, err) } rows, err := protocol.DecodeRows(r.response) if err != nil { return driverError(r.log, err) } r.rows = rows return r.rows.Next(dest) } if err == io.EOF { r.consumed = true } return err } // ColumnTypeScanType implements RowsColumnTypeScanType. func (r *Rows) ColumnTypeScanType(i int) reflect.Type { // column := sql.NewColumn(r.rows, i) // typ, err := r.protocol.ColumnTypeScanType(context.Background(), column) // if err != nil { // return nil // } // return typ.DriverType() return nil } // ColumnTypeDatabaseTypeName implements RowsColumnTypeDatabaseTypeName. // warning: not thread safe func (r *Rows) ColumnTypeDatabaseTypeName(i int) string { if r.types == nil { var err error r.types, err = r.rows.ColumnTypes() // an error might not matter if we get our types if err != nil && i >= len(r.types) { // a panic here doesn't really help, // as an empty column type is not the end of the world // but we should still inform the user of the failure const msg = "row (%p) error returning column #%d type: %v\n" r.log(client.LogWarn, msg, r, i, err) return "" } } return r.types[i] } // Convert a driver.Value slice into a driver.NamedValue slice. func valuesToNamedValues(args []driver.Value) []driver.NamedValue { namedValues := make([]driver.NamedValue, len(args)) for i, value := range args { namedValues[i] = driver.NamedValue{ Ordinal: i + 1, Value: value, } } return namedValues } type unwrappable interface { Unwrap() error } // TODO driver.ErrBadConn should not be returned when there's a possibility that // the query has been executed. In our case there is a window in protocol.Call // between `send` and `recv` where the send has succeeded but the recv has // failed. In those cases we call driverError on the result of protocol.Call, // possibly returning ErrBadCon. // https://cs.opensource.google/go/go/+/refs/tags/go1.20.4:src/database/sql/driver/driver.go;drc=a32a592c8c14927c20ac42808e1fb2e55b2e9470;l=162 func driverError(log client.LogFunc, err error) error { switch err := errors.Cause(err).(type) { case syscall.Errno: log(client.LogDebug, "network connection lost: %v", err) return driver.ErrBadConn case *net.OpError: log(client.LogDebug, "network connection lost: %v", err) return driver.ErrBadConn case protocol.ErrRequest: switch err.Code { case errIoErrNotLeaderLegacy: fallthrough case errIoErrLeadershipLostLegacy: fallthrough case errIoErrNotLeader: fallthrough case errIoErrLeadershipLost: log(client.LogDebug, "leadership lost (%d - %s)", err.Code, err.Description) return driver.ErrBadConn case errNotFound: log(client.LogDebug, "not found - potentially after leadership loss (%d - %s)", err.Code, err.Description) return driver.ErrBadConn default: // FIXME: the server side sometimes return SQLITE_OK // even in case of errors. This issue is still being // investigated, but for now let's just mark this // connection as bad so the client will retry. if err.Code == 0 { log(client.LogWarn, "unexpected error code (%d - %s)", err.Code, err.Description) return driver.ErrBadConn } return Error{ Code: int(err.Code), Message: err.Description, } } default: // When using a TLS connection, the underlying error might get // wrapped by the stdlib itself with the new errors wrapping // conventions available since go 1.13. In that case we check // the underlying error with Unwrap() instead of Cause(). if root, ok := err.(unwrappable); ok { err = root.Unwrap() } switch err.(type) { case *net.OpError: log(client.LogDebug, "network connection lost: %v", err) return driver.ErrBadConn } } if errors.Is(err, io.EOF) { log(client.LogDebug, "EOF detected: %v", err) return driver.ErrBadConn } return err } golang-github-cowsql-go-cowsql-1.22.0/driver/driver_test.go000066400000000000000000000340151447672437700237460ustar00rootroot00000000000000// Copyright 2017 Canonical Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver_test import ( "context" "database/sql/driver" "io" "io/ioutil" "os" "strings" "testing" cowsql "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" cowsqldriver "github.com/cowsql/go-cowsql/driver" "github.com/cowsql/go-cowsql/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDriver_Open(t *testing.T) { driver, cleanup := newDriver(t) defer cleanup() conn, err := driver.Open("test.db") require.NoError(t, err) assert.NoError(t, conn.Close()) } func TestDriver_Prepare(t *testing.T) { driver, cleanup := newDriver(t) defer cleanup() conn, err := driver.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) assert.Equal(t, 0, stmt.NumInput()) assert.NoError(t, conn.Close()) } func TestConn_Exec(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT)", nil) require.NoError(t, err) result, err := execer.Exec("INSERT INTO test(n) VALUES(1)", nil) require.NoError(t, err) lastInsertID, err := result.LastInsertId() require.NoError(t, err) assert.Equal(t, lastInsertID, int64(1)) rowsAffected, err := result.RowsAffected() require.NoError(t, err) assert.Equal(t, rowsAffected, int64(1)) assert.NoError(t, conn.Close()) } func TestConn_Query(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT)", nil) require.NoError(t, err) _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil) require.NoError(t, err) queryer := conn.(driver.Queryer) _, err = queryer.Query("SELECT n FROM test", nil) require.NoError(t, err) assert.NoError(t, conn.Close()) } func TestConn_QueryRow(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT)", nil) require.NoError(t, err) _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil) require.NoError(t, err) _, err = execer.Exec("INSERT INTO test(n) VALUES(1)", nil) require.NoError(t, err) queryer := conn.(driver.Queryer) rows, err := queryer.Query("SELECT n FROM test", nil) require.NoError(t, err) values := make([]driver.Value, 1) require.NoError(t, rows.Next(values)) require.NoError(t, rows.Close()) assert.NoError(t, conn.Close()) } func TestConn_QueryBlob(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (data BLOB)", nil) require.NoError(t, err) values := []driver.Value{ []byte{'a', 'b', 'c'}, } _, err = execer.Exec("INSERT INTO test(data) VALUES(?)", values) require.NoError(t, err) queryer := conn.(driver.Queryer) rows, err := queryer.Query("SELECT data FROM test", nil) require.NoError(t, err) assert.Equal(t, rows.Columns(), []string{"data"}) values = make([]driver.Value, 1) require.NoError(t, rows.Next(values)) assert.Equal(t, []byte{'a', 'b', 'c'}, values[0]) assert.NoError(t, conn.Close()) } func TestStmt_Exec(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) values := []driver.Value{ int64(1), } stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(?)") require.NoError(t, err) result, err := stmt.Exec(values) require.NoError(t, err) lastInsertID, err := result.LastInsertId() require.NoError(t, err) assert.Equal(t, lastInsertID, int64(1)) rowsAffected, err := result.RowsAffected() require.NoError(t, err) assert.Equal(t, rowsAffected, int64(1)) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func TestStmt_ExecManyParams(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("INSERT INTO test(n) VALUES " + strings.Repeat("(?), ", 299) + " (?)") require.NoError(t, err) values := make([]driver.Value, 300) for i := range values { values[i] = int64(1) } _, err = stmt.Exec(values) require.NoError(t, err) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func TestStmt_Query(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)") require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("SELECT n FROM test") require.NoError(t, err) rows, err := stmt.Query(nil) require.NoError(t, err) assert.Equal(t, rows.Columns(), []string{"n"}) values := make([]driver.Value, 1) require.NoError(t, rows.Next(values)) assert.Equal(t, int64(-123), values[0]) require.Equal(t, io.EOF, rows.Next(values)) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func TestStmt_QueryManyParams(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("SELECT n FROM test WHERE n IN (" + strings.Repeat("?, ", 299) + " ?)") require.NoError(t, err) values := make([]driver.Value, 300) for i := range values { values[i] = int64(1) } _, err = stmt.Query(values) require.NoError(t, err) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func TestConn_QueryParams(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT, t TEXT)", nil) require.NoError(t, err) _, err = execer.Exec(` INSERT INTO test (n,t) VALUES (1,'a'); INSERT INTO test (n,t) VALUES (2,'a'); INSERT INTO test (n,t) VALUES (2,'b'); INSERT INTO test (n,t) VALUES (3,'b'); `, nil) require.NoError(t, err) values := []driver.Value{ int64(1), "a", } queryer := conn.(driver.Queryer) rows, err := queryer.Query("SELECT n, t FROM test WHERE n > ? AND t = ?", values) require.NoError(t, err) assert.Equal(t, rows.Columns()[0], "n") values = make([]driver.Value, 2) require.NoError(t, rows.Next(values)) assert.Equal(t, int64(2), values[0]) assert.Equal(t, "a", values[1]) require.Equal(t, io.EOF, rows.Next(values)) assert.NoError(t, conn.Close()) } func TestConn_QueryManyParams(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT)", nil) require.NoError(t, err) values := make([]driver.Value, 300) for i := range values { values[i] = int64(1) } queryer := conn.(driver.Queryer) _, err = queryer.Query("SELECT n FROM test WHERE n IN ("+strings.Repeat("?, ", 299)+" ?)", values) require.NoError(t, err) assert.NoError(t, conn.Close()) } func TestConn_ExecManyParams(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) execer := conn.(driver.Execer) _, err = execer.Exec("CREATE TABLE test (n INT)", nil) require.NoError(t, err) values := make([]driver.Value, 300) for i := range values { values[i] = int64(1) } _, err = execer.Exec("INSERT INTO test(n) VALUES "+strings.Repeat("(?), ", 299)+" (?)", values) require.NoError(t, err) assert.NoError(t, conn.Close()) } func Test_ColumnTypesEmpty(t *testing.T) { t.Skip("this currently fails if the result set is empty, is cowsql skipping the header if empty set?") drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("SELECT n FROM test") require.NoError(t, err) rows, err := stmt.Query(nil) require.NoError(t, err) require.NoError(t, err) rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName) require.True(t, ok) typeName := rowTypes.ColumnTypeDatabaseTypeName(0) assert.Equal(t, "INTEGER", typeName) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func Test_ColumnTypesExists(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)") require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) stmt, err = conn.Prepare("SELECT n FROM test") require.NoError(t, err) rows, err := stmt.Query(nil) require.NoError(t, err) require.NoError(t, err) rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName) require.True(t, ok) typeName := rowTypes.ColumnTypeDatabaseTypeName(0) assert.Equal(t, "INTEGER", typeName) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } // ensure column types data is available // even after the last row of the query func Test_ColumnTypesEnd(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) stmt, err := conn.Prepare("CREATE TABLE test (n INT)") require.NoError(t, err) _, err = conn.Begin() require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) require.NoError(t, stmt.Close()) stmt, err = conn.Prepare("INSERT INTO test(n) VALUES(-123)") require.NoError(t, err) _, err = stmt.Exec(nil) require.NoError(t, err) stmt, err = conn.Prepare("SELECT n FROM test") require.NoError(t, err) rows, err := stmt.Query(nil) require.NoError(t, err) require.NoError(t, err) rowTypes, ok := rows.(driver.RowsColumnTypeDatabaseTypeName) require.True(t, ok) typeName := rowTypes.ColumnTypeDatabaseTypeName(0) assert.Equal(t, "INTEGER", typeName) values := make([]driver.Value, 1) require.NoError(t, rows.Next(values)) assert.Equal(t, int64(-123), values[0]) require.Equal(t, io.EOF, rows.Next(values)) // despite EOF we should have types cached typeName = rowTypes.ColumnTypeDatabaseTypeName(0) assert.Equal(t, "INTEGER", typeName) require.NoError(t, stmt.Close()) assert.NoError(t, conn.Close()) } func Test_ZeroColumns(t *testing.T) { drv, cleanup := newDriver(t) defer cleanup() conn, err := drv.Open("test.db") require.NoError(t, err) queryer := conn.(driver.Queryer) rows, err := queryer.Query("CREATE TABLE foo (bar INTEGER)", []driver.Value{}) require.NoError(t, err) values := []driver.Value{} require.Equal(t, io.EOF, rows.Next(values)) require.NoError(t, conn.Close()) } func newDriver(t *testing.T) (*cowsqldriver.Driver, func()) { t.Helper() _, cleanup := newNode(t) store := newStore(t, "@1") log := logging.Test(t) driver, err := cowsqldriver.New(store, cowsqldriver.WithLogFunc(log)) require.NoError(t, err) return driver, cleanup } // Create a new in-memory server store populated with the given addresses. func newStore(t *testing.T, address string) client.NodeStore { t.Helper() store := client.NewInmemNodeStore() server := client.NodeInfo{Address: address} require.NoError(t, store.Set(context.Background(), []client.NodeInfo{server})) return store } func newNode(t *testing.T) (*cowsql.Node, func()) { t.Helper() dir, dirCleanup := newDir(t) server, err := cowsql.New(uint64(1), "@1", dir, cowsql.WithBindAddress("@1")) require.NoError(t, err) err = server.Start() require.NoError(t, err) cleanup := func() { require.NoError(t, server.Close()) dirCleanup() } return server, cleanup } // Return a new temporary directory. func newDir(t *testing.T) (string, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-replication-test-") assert.NoError(t, err) cleanup := func() { _, err := os.Stat(dir) if err != nil { assert.True(t, os.IsNotExist(err)) } else { assert.NoError(t, os.RemoveAll(dir)) } } return dir, cleanup } golang-github-cowsql-go-cowsql-1.22.0/driver/integration_test.go000066400000000000000000000254731447672437700250060ustar00rootroot00000000000000package driver_test import ( "context" "database/sql" "fmt" "os" "testing" "time" cowsql "github.com/cowsql/go-cowsql" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/driver" "github.com/cowsql/go-cowsql/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // https://sqlite.org/rescode.html#constraint_unique const SQLITE_CONSTRAINT_UNIQUE = 2067 func TestIntegration_DatabaseSQL(t *testing.T) { db, _, cleanup := newDB(t, 3) defer cleanup() tx, err := db.Begin() require.NoError(t, err) _, err = tx.Exec(` CREATE TABLE test (n INT, s TEXT); CREATE TABLE test2 (n INT, t DATETIME DEFAULT CURRENT_TIMESTAMP) `) require.NoError(t, err) stmt, err := tx.Prepare("INSERT INTO test(n, s) VALUES(?, ?)") require.NoError(t, err) _, err = stmt.Exec(int64(123), "hello") require.NoError(t, err) require.NoError(t, stmt.Close()) _, err = tx.Exec("INSERT INTO test2(n) VALUES(?)", int64(456)) require.NoError(t, err) require.NoError(t, tx.Commit()) tx, err = db.Begin() require.NoError(t, err) rows, err := tx.Query("SELECT n, s FROM test") require.NoError(t, err) for rows.Next() { var n int64 var s string require.NoError(t, rows.Scan(&n, &s)) assert.Equal(t, int64(123), n) assert.Equal(t, "hello", s) } require.NoError(t, rows.Err()) require.NoError(t, rows.Close()) rows, err = tx.Query("SELECT n, t FROM test2") require.NoError(t, err) for rows.Next() { var n int64 var s time.Time require.NoError(t, rows.Scan(&n, &s)) assert.Equal(t, int64(456), n) } require.NoError(t, rows.Err()) require.NoError(t, rows.Close()) require.NoError(t, tx.Rollback()) } func TestIntegration_ConstraintError(t *testing.T) { db, _, cleanup := newDB(t, 3) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT, UNIQUE (n))") require.NoError(t, err) _, err = db.Exec("INSERT INTO test (n) VALUES (1)") require.NoError(t, err) _, err = db.Exec("INSERT INTO test (n) VALUES (1)") if err, ok := err.(driver.Error); ok { assert.Equal(t, SQLITE_CONSTRAINT_UNIQUE, err.Code) assert.Equal(t, "UNIQUE constraint failed: test.n", err.Message) } else { t.Fatalf("expected diver error, got %+v", err) } } func TestIntegration_ExecBindError(t *testing.T) { db, _, cleanup := newDB(t, 1) defer cleanup() defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() _, err := db.ExecContext(ctx, "CREATE TABLE test (n INT)") require.NoError(t, err) _, err = db.ExecContext(ctx, "INSERT INTO test(n) VALUES(1)", 1) assert.EqualError(t, err, "bind parameters") } func TestIntegration_QueryBindError(t *testing.T) { db, _, cleanup := newDB(t, 1) defer cleanup() defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() _, err := db.QueryContext(ctx, "SELECT 1", 1) assert.EqualError(t, err, "bind parameters") } func TestIntegration_LargeQuery(t *testing.T) { db, _, cleanup := newDB(t, 3) defer cleanup() tx, err := db.Begin() require.NoError(t, err) _, err = tx.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) stmt, err := tx.Prepare("INSERT INTO test(n) VALUES(?)") require.NoError(t, err) for i := 0; i < 512; i++ { _, err = stmt.Exec(int64(i)) require.NoError(t, err) } require.NoError(t, stmt.Close()) require.NoError(t, tx.Commit()) tx, err = db.Begin() require.NoError(t, err) rows, err := tx.Query("SELECT n FROM test") require.NoError(t, err) columns, err := rows.Columns() require.NoError(t, err) assert.Equal(t, []string{"n"}, columns) count := 0 for i := 0; rows.Next(); i++ { var n int64 require.NoError(t, rows.Scan(&n)) assert.Equal(t, int64(i), n) count++ } require.NoError(t, rows.Err()) require.NoError(t, rows.Close()) assert.Equal(t, count, 512) require.NoError(t, tx.Rollback()) } // Build a 2-node cluster, kill one node and recover the other. func TestIntegration_Recover(t *testing.T) { db, helpers, cleanup := newDB(t, 2) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) helpers[0].Close() helpers[1].Close() helpers[0].Create() infos := []client.NodeInfo{{ID: 1, Address: "@1"}} require.NoError(t, helpers[0].Node.Recover(infos)) helpers[0].Start() // FIXME: this is necessary otherwise the INSERT below fails with "no // such table", because the replication hooks are not triggered and the // barrier is not applied. _, err = db.Exec("CREATE TABLE test2 (n INT)") require.NoError(t, err) _, err = db.Exec("INSERT INTO test(n) VALUES(1)") require.NoError(t, err) } // The db.Ping() method can be used to wait until there is a stable leader. func TestIntegration_PingOnlyWorksOnceLeaderElected(t *testing.T) { db, helpers, cleanup := newDB(t, 2) defer cleanup() helpers[0].Close() // Ping returns an error, since the cluster is not available. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() assert.Error(t, db.PingContext(ctx)) helpers[0].Create() helpers[0].Start() // Ping now returns no error, since the cluster is available. assert.NoError(t, db.Ping()) // If leadership is lost after the first successful call, Ping() still // returns no error. helpers[0].Close() assert.NoError(t, db.Ping()) } func TestIntegration_HighAvailability(t *testing.T) { db, helpers, cleanup := newDB(t, 3) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) // Shutdown all three nodes. helpers[0].Close() helpers[1].Close() helpers[2].Close() // Restart two of them. helpers[1].Create() helpers[2].Create() helpers[1].Start() helpers[2].Start() // Give the cluster a chance to establish a quorom time.Sleep(2 * time.Second) _, err = db.Exec("INSERT INTO test(n) VALUES(1)") require.NoError(t, err) } func TestIntegration_LeadershipTransfer(t *testing.T) { db, helpers, cleanup := newDB(t, 3) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) cli := helpers[0].Client() require.NoError(t, cli.Transfer(context.Background(), 2)) _, err = db.Exec("INSERT INTO test(n) VALUES(1)") require.NoError(t, err) } func TestIntegration_LeadershipTransfer_Tx(t *testing.T) { db, helpers, cleanup := newDB(t, 3) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT)") require.NoError(t, err) cli := helpers[0].Client() require.NoError(t, cli.Transfer(context.Background(), 2)) tx, err := db.Begin() require.NoError(t, err) _, err = tx.Query("SELECT * FROM test") require.NoError(t, err) require.NoError(t, tx.Commit()) } func TestOptions(t *testing.T) { // make sure applying all options doesn't break anything store := client.NewInmemNodeStore() log := logging.Test(t) _, err := driver.New( store, driver.WithLogFunc(log), driver.WithContext(context.Background()), driver.WithConnectionTimeout(15*time.Second), driver.WithContextTimeout(2*time.Second), driver.WithConnectionBackoffFactor(50*time.Millisecond), driver.WithConnectionBackoffCap(1*time.Second), driver.WithAttemptTimeout(5*time.Second), driver.WithRetryLimit(0), ) require.NoError(t, err) } func newDB(t *testing.T, n int) (*sql.DB, []*nodeHelper, func()) { infos := make([]client.NodeInfo, n) for i := range infos { infos[i].ID = uint64(i + 1) infos[i].Address = fmt.Sprintf("@%d", infos[i].ID) infos[i].Role = client.Voter } return newDBWithInfos(t, infos) } func newDBWithInfos(t *testing.T, infos []client.NodeInfo) (*sql.DB, []*nodeHelper, func()) { helpers, helpersCleanup := newNodeHelpers(t, infos) store := client.NewInmemNodeStore() require.NoError(t, store.Set(context.Background(), infos)) log := logging.Test(t) driver, err := driver.New(store, driver.WithLogFunc(log)) require.NoError(t, err) driverName := fmt.Sprintf("cowsql-integration-test-%d", driversCount) sql.Register(driverName, driver) driversCount++ db, err := sql.Open(driverName, "test.db") require.NoError(t, err) cleanup := func() { require.NoError(t, db.Close()) helpersCleanup() } return db, helpers, cleanup } func registerDriver(driver *driver.Driver) string { name := fmt.Sprintf("cowsql-integration-test-%d", driversCount) sql.Register(name, driver) driversCount++ return name } type nodeHelper struct { t *testing.T ID uint64 Address string Dir string Node *cowsql.Node } func newNodeHelper(t *testing.T, id uint64, address string) *nodeHelper { h := &nodeHelper{ t: t, ID: id, Address: address, } h.Dir, _ = newDir(t) h.Create() h.Start() return h } func (h *nodeHelper) Client() *client.Client { client, err := client.New(context.Background(), h.Node.BindAddress()) require.NoError(h.t, err) return client } func (h *nodeHelper) Create() { var err error require.Nil(h.t, h.Node) h.Node, err = cowsql.New(h.ID, h.Address, h.Dir, cowsql.WithBindAddress(h.Address)) require.NoError(h.t, err) } func (h *nodeHelper) Start() { require.NotNil(h.t, h.Node) require.NoError(h.t, h.Node.Start()) } func (h *nodeHelper) Close() { require.NotNil(h.t, h.Node) require.NoError(h.t, h.Node.Close()) h.Node = nil } func (h *nodeHelper) cleanup() { if h.Node != nil { h.Close() } require.NoError(h.t, os.RemoveAll(h.Dir)) } func newNodeHelpers(t *testing.T, infos []client.NodeInfo) ([]*nodeHelper, func()) { t.Helper() n := len(infos) helpers := make([]*nodeHelper, n) for i, info := range infos { helpers[i] = newNodeHelper(t, info.ID, info.Address) if i > 0 { client := helpers[0].Client() defer client.Close() require.NoError(t, client.Add(context.Background(), infos[i])) } } cleanup := func() { for _, helper := range helpers { helper.cleanup() } } return helpers, cleanup } var driversCount = 0 func TestIntegration_ColumnTypeName(t *testing.T) { db, _, cleanup := newDB(t, 1) defer cleanup() _, err := db.Exec("CREATE TABLE test (n INT, UNIQUE (n))") require.NoError(t, err) _, err = db.Exec("INSERT INTO test (n) VALUES (1)") require.NoError(t, err) rows, err := db.Query("SELECT n FROM test") require.NoError(t, err) defer rows.Close() types, err := rows.ColumnTypes() require.NoError(t, err) assert.Equal(t, "INTEGER", types[0].DatabaseTypeName()) require.True(t, rows.Next()) var n int64 err = rows.Scan(&n) require.NoError(t, err) assert.Equal(t, int64(1), n) } func TestIntegration_SqlNullTime(t *testing.T) { db, _, cleanup := newDB(t, 1) defer cleanup() _, err := db.Exec("CREATE TABLE test (tm DATETIME)") require.NoError(t, err) // Insert sql.NullTime into DB var t1 sql.NullTime res, err := db.Exec("INSERT INTO test (tm) VALUES (?)", t1) require.NoError(t, err) n, err := res.RowsAffected() require.NoError(t, err) assert.EqualValues(t, n, 1) // Retrieve inserted sql.NullTime from DB row := db.QueryRow("SELECT tm FROM test LIMIT 1") var t2 sql.NullTime err = row.Scan(&t2) require.NoError(t, err) assert.Equal(t, t1, t2) } golang-github-cowsql-go-cowsql-1.22.0/go.mod000066400000000000000000000010701447672437700206730ustar00rootroot00000000000000module github.com/cowsql/go-cowsql // This is to maintain the ppa package on focal go 1.13 require ( github.com/Rican7/retry v0.3.0 github.com/google/renameio v1.0.1 github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-sqlite3 v1.14.7 github.com/peterh/liner v1.2.1 github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.2.1 github.com/stretchr/testify v1.7.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 gopkg.in/yaml.v2 v2.4.0 ) golang-github-cowsql-go-cowsql-1.22.0/go.sum000066400000000000000000001631571447672437700207370ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Rican7/retry v0.3.0 h1:ixNrbGAPoTSjXhcXOKT/X6bj3wexR4DPqWVrdkl+9K0= github.com/Rican7/retry v0.3.0/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU= github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterh/liner v1.2.1 h1:O4BlKaq/LWu6VRWmol4ByWfzx6MfXc5Op5HETyIy5yg= github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= golang-github-cowsql-go-cowsql-1.22.0/internal/000077500000000000000000000000001447672437700214035ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/internal/bindings/000077500000000000000000000000001447672437700232005ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/internal/bindings/build.go000066400000000000000000000002331447672437700246240ustar00rootroot00000000000000package bindings /* #cgo linux LDFLAGS: -lcowsql */ import "C" // required cowsql version var cowsqlMajorVersion int = 1 var cowsqlMinorVersion int = 14 golang-github-cowsql-go-cowsql-1.22.0/internal/bindings/server.go000066400000000000000000000203271447672437700250410ustar00rootroot00000000000000package bindings /* #include #include #include #include #include #include #include #include #define RAFT_NOCONNECTION 16 #define EMIT_BUF_LEN 1024 typedef unsigned long long nanoseconds_t; typedef unsigned long long failure_domain_t; // Duplicate a file descriptor and prevent it from being cloned into child processes. static int dupCloexec(int oldfd) { int newfd = -1; newfd = dup(oldfd); if (newfd < 0) { return -1; } if (fcntl(newfd, F_SETFD, FD_CLOEXEC) < 0) { close(newfd); return -1; } return newfd; } // C to Go trampoline for custom connect function. int connectWithDial(uintptr_t handle, char *address, int *fd); // Wrapper to call the Go trampoline. static int connectTrampoline(void *data, const char *address, int *fd) { uintptr_t handle = (uintptr_t)(data); return connectWithDial(handle, (char*)address, fd); } // Configure a custom connect function. static int configConnectFunc(cowsql_node *t, uintptr_t handle) { return cowsql_node_set_connect_func(t, connectTrampoline, (void*)handle); } static cowsql_node_info_ext *makeInfos(int n) { return calloc(n, sizeof(cowsql_node_info_ext)); } static void setInfo(cowsql_node_info_ext *infos, unsigned i, cowsql_node_id id, const char *address, int role) { cowsql_node_info_ext *info = &infos[i]; info->size = sizeof(cowsql_node_info_ext); info->id = id; info->address = (uint64_t)(uintptr_t)address; info->cowsql_role = role; } */ import "C" import ( "context" "fmt" "net" "os" "sync" "time" "unsafe" "github.com/cowsql/go-cowsql/internal/protocol" ) type Node struct { node *C.cowsql_node ctx context.Context cancel context.CancelFunc } type SnapshotParams struct { Threshold uint64 Trailing uint64 } // Initializes state. func init() { // FIXME: ignore SIGPIPE, see https://github.com/joyent/libuv/issues/1254 C.signal(C.SIGPIPE, C.SIG_IGN) } // NewNode creates a new Node instance. func NewNode(ctx context.Context, id uint64, address string, dir string) (*Node, error) { requiredVersion := cowsqlMajorVersion*100 + cowsqlMinorVersion // Remove the patch version, as patch versions should be compatible. runtimeVersion := int(C.cowsql_version_number()) / 100 if requiredVersion > runtimeVersion { return nil, fmt.Errorf("version mismatch: required version(%d.%d.x) current version(%d.%d.x)", cowsqlMajorVersion, cowsqlMinorVersion, runtimeVersion/100, runtimeVersion%100) } var server *C.cowsql_node cid := C.cowsql_node_id(id) caddress := C.CString(address) defer C.free(unsafe.Pointer(caddress)) cdir := C.CString(dir) defer C.free(unsafe.Pointer(cdir)) if rc := C.cowsql_node_create(cid, caddress, cdir, &server); rc != 0 { errmsg := C.GoString(C.cowsql_node_errmsg(server)) C.cowsql_node_destroy(server) return nil, fmt.Errorf("%s", errmsg) } node := &Node{node: (*C.cowsql_node)(unsafe.Pointer(server))} node.ctx, node.cancel = context.WithCancel(ctx) return node, nil } func (s *Node) SetDialFunc(dial protocol.DialFunc) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) connectLock.Lock() defer connectLock.Unlock() connectIndex++ connectRegistry[connectIndex] = dial contextRegistry[connectIndex] = s.ctx if rc := C.configConnectFunc(server, connectIndex); rc != 0 { return fmt.Errorf("failed to set connect func") } return nil } func (s *Node) SetBindAddress(address string) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) caddress := C.CString(address) defer C.free(unsafe.Pointer(caddress)) if rc := C.cowsql_node_set_bind_address(server, caddress); rc != 0 { return fmt.Errorf("failed to set bind address %q: %d", address, rc) } return nil } func (s *Node) SetNetworkLatency(nanoseconds uint64) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) cnanoseconds := C.nanoseconds_t(nanoseconds) if rc := C.cowsql_node_set_network_latency(server, cnanoseconds); rc != 0 { return fmt.Errorf("failed to set network latency") } return nil } func (s *Node) SetSnapshotParams(params SnapshotParams) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) cthreshold := C.unsigned(params.Threshold) ctrailing := C.unsigned(params.Trailing) if rc := C.cowsql_node_set_snapshot_params(server, cthreshold, ctrailing); rc != 0 { return fmt.Errorf("failed to set snapshot params") } return nil } func (s *Node) SetFailureDomain(code uint64) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) ccode := C.failure_domain_t(code) if rc := C.cowsql_node_set_failure_domain(server, ccode); rc != 0 { return fmt.Errorf("set failure domain: %d", rc) } return nil } func (s *Node) SetAutoRecovery(on bool) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) if rc := C.cowsql_node_set_auto_recovery(server, C.bool(on)); rc != 0 { return fmt.Errorf("failed to set auto-recovery behavior") } return nil } func (s *Node) GetBindAddress() string { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) return C.GoString(C.cowsql_node_get_bind_address(server)) } func (s *Node) Start() error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) if rc := C.cowsql_node_start(server); rc != 0 { errmsg := C.GoString(C.cowsql_node_errmsg(server)) return fmt.Errorf("%s", errmsg) } return nil } func (s *Node) Stop() error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) if rc := C.cowsql_node_stop(server); rc != 0 { return fmt.Errorf("task stopped with error code %d", rc) } return nil } // Close the server releasing all used resources. func (s *Node) Close() { defer s.cancel() server := (*C.cowsql_node)(unsafe.Pointer(s.node)) C.cowsql_node_destroy(server) } // Remark that Recover doesn't take the node role into account func (s *Node) Recover(cluster []protocol.NodeInfo) error { for i, _ := range cluster { cluster[i].Role = protocol.Voter } return s.RecoverExt(cluster) } // RecoverExt has a similar purpose as `Recover` but takes the node role into account func (s *Node) RecoverExt(cluster []protocol.NodeInfo) error { server := (*C.cowsql_node)(unsafe.Pointer(s.node)) n := C.int(len(cluster)) infos := C.makeInfos(n) defer C.free(unsafe.Pointer(infos)) for i, info := range cluster { cid := C.cowsql_node_id(info.ID) caddress := C.CString(info.Address) crole := C.int(info.Role) defer C.free(unsafe.Pointer(caddress)) C.setInfo(infos, C.unsigned(i), cid, caddress, crole) } if rc := C.cowsql_node_recover_ext(server, infos, n); rc != 0 { return fmt.Errorf("recover failed with error code %d", rc) } return nil } // GenerateID generates a unique ID for a server. func GenerateID(address string) uint64 { caddress := C.CString(address) defer C.free(unsafe.Pointer(caddress)) id := C.cowsql_generate_node_id(caddress) return uint64(id) } // Extract the underlying socket from a connection. func connToSocket(conn net.Conn) (C.int, error) { file, err := conn.(fileConn).File() if err != nil { return C.int(-1), err } fd1 := C.int(file.Fd()) // Duplicate the file descriptor, in order to prevent Go's finalizer to // close it. fd2 := C.dupCloexec(fd1) if fd2 < 0 { return C.int(-1), fmt.Errorf("failed to dup socket fd") } conn.Close() return fd2, nil } // Interface that net.Conn must implement in order to extract the underlying // file descriptor. type fileConn interface { File() (*os.File, error) } //export connectWithDial func connectWithDial(handle C.uintptr_t, address *C.char, fd *C.int) C.int { connectLock.Lock() defer connectLock.Unlock() dial := connectRegistry[handle] ctx := contextRegistry[handle] // TODO: make timeout customizable. dialCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() conn, err := dial(dialCtx, C.GoString(address)) if err != nil { return C.RAFT_NOCONNECTION } socket, err := connToSocket(conn) if err != nil { return C.RAFT_NOCONNECTION } *fd = socket return C.int(0) } // Use handles to avoid passing Go pointers to C. var contextRegistry = make(map[C.uintptr_t]context.Context) var connectRegistry = make(map[C.uintptr_t]protocol.DialFunc) var connectIndex C.uintptr_t = 100 var connectLock = sync.Mutex{} // ErrNodeStopped is returned by Node.Handle() is the server was stopped. var ErrNodeStopped = fmt.Errorf("server was stopped") // To compare bool values. var cfalse C.bool golang-github-cowsql-go-cowsql-1.22.0/internal/bindings/server_test.go000066400000000000000000000114221447672437700260740ustar00rootroot00000000000000package bindings_test import ( "context" "encoding/binary" "io/ioutil" "net" "os" "strings" "testing" "time" "github.com/cowsql/go-cowsql/internal/bindings" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNode_Create(t *testing.T) { _, cleanup := newNode(t) defer cleanup() } func TestNode_Start(t *testing.T) { dir, cleanup := newDir(t) defer cleanup() server, err := bindings.NewNode(context.Background(), 1, "1", dir) require.NoError(t, err) defer server.Close() err = server.SetBindAddress("@") require.NoError(t, err) err = server.Start() require.NoError(t, err) conn, err := net.Dial("unix", server.GetBindAddress()) require.NoError(t, err) conn.Close() assert.True(t, strings.HasPrefix(server.GetBindAddress(), "@")) err = server.Stop() require.NoError(t, err) } func TestNode_Restart(t *testing.T) { dir, cleanup := newDir(t) defer cleanup() server, err := bindings.NewNode(context.Background(), 1, "1", dir) require.NoError(t, err) require.NoError(t, server.SetBindAddress("@abc")) require.NoError(t, server.Start()) require.NoError(t, server.Stop()) server.Close() server, err = bindings.NewNode(context.Background(), 1, "1", dir) require.NoError(t, err) require.NoError(t, server.SetBindAddress("@abc")) require.NoError(t, server.Start()) require.NoError(t, server.Stop()) server.Close() } func TestNode_Start_Inet(t *testing.T) { dir, cleanup := newDir(t) defer cleanup() server, err := bindings.NewNode(context.Background(), 1, "1", dir) require.NoError(t, err) defer server.Close() err = server.SetBindAddress("127.0.0.1:9000") require.NoError(t, err) err = server.Start() require.NoError(t, err) conn, err := net.Dial("tcp", server.GetBindAddress()) require.NoError(t, err) conn.Close() err = server.Stop() require.NoError(t, err) } func TestNode_Leader(t *testing.T) { _, cleanup := newNode(t) defer cleanup() conn := newClient(t) // Make a Leader request buf := makeClientRequest(t, conn, protocol.RequestLeader) assert.Equal(t, uint8(1), buf[0]) require.NoError(t, conn.Close()) } // func TestNode_Heartbeat(t *testing.T) { // server, cleanup := newNode(t) // defer cleanup() // listener, cleanup := newListener(t) // defer cleanup() // cleanup = runNode(t, server, listener) // defer cleanup() // conn := newClient(t, listener) // // Make a Heartbeat request // makeClientRequest(t, conn, bindings.RequestHeartbeat) // require.NoError(t, conn.Close()) // } // func TestNode_ConcurrentHandleAndClose(t *testing.T) { // server, cleanup := newNode(t) // defer cleanup() // listener, cleanup := newListener(t) // defer cleanup() // acceptCh := make(chan error) // go func() { // conn, err := listener.Accept() // if err != nil { // acceptCh <- err // } // server.Handle(conn) // acceptCh <- nil // }() // conn, err := net.Dial("unix", listener.Addr().String()) // require.NoError(t, err) // require.NoError(t, conn.Close()) // assert.NoError(t, <-acceptCh) // } // Create a new Node object for tests. func newNode(t *testing.T) (*bindings.Node, func()) { t.Helper() dir, dirCleanup := newDir(t) server, err := bindings.NewNode(context.Background(), 1, "1", dir) require.NoError(t, err) err = server.SetBindAddress("@test") require.NoError(t, err) require.NoError(t, server.Start()) cleanup := func() { require.NoError(t, server.Stop()) server.Close() dirCleanup() } return server, cleanup } // Create a new client network connection, performing the handshake. func newClient(t *testing.T) net.Conn { t.Helper() conn, err := net.Dial("unix", "@test") require.NoError(t, err) // Handshake err = binary.Write(conn, binary.LittleEndian, protocol.VersionLegacy) require.NoError(t, err) return conn } // Perform a client request. func makeClientRequest(t *testing.T, conn net.Conn, kind byte) []byte { t.Helper() // Number of words err := binary.Write(conn, binary.LittleEndian, uint32(1)) require.NoError(t, err) // Type, flags, extra. n, err := conn.Write([]byte{kind, 0, 0, 0}) require.NoError(t, err) require.Equal(t, 4, n) n, err = conn.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0}) // Unused single-word request payload require.NoError(t, err) require.Equal(t, 8, n) // Read the response conn.SetDeadline(time.Now().Add(250 * time.Millisecond)) buf := make([]byte, 64) _, err = conn.Read(buf) require.NoError(t, err) return buf } // Return a new temporary directory. func newDir(t *testing.T) (string, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-replication-test-") assert.NoError(t, err) cleanup := func() { _, err := os.Stat(dir) if err != nil { assert.True(t, os.IsNotExist(err)) } else { assert.NoError(t, os.RemoveAll(dir)) } } return dir, cleanup } golang-github-cowsql-go-cowsql-1.22.0/internal/bindings/sqlite3.go000066400000000000000000000013301447672437700251100ustar00rootroot00000000000000// +build !nosqlite3 package bindings import ( "github.com/cowsql/go-cowsql/internal/protocol" ) /* #cgo linux LDFLAGS: -lsqlite3 #include static int sqlite3ConfigSingleThread() { return sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); } static int sqlite3ConfigMultiThread() { return sqlite3_config(SQLITE_CONFIG_MULTITHREAD); } */ import "C" func ConfigSingleThread() error { if rc := C.sqlite3ConfigSingleThread(); rc != 0 { return protocol.Error{Message: C.GoString(C.sqlite3_errstr(rc)), Code: int(rc)} } return nil } func ConfigMultiThread() error { if rc := C.sqlite3ConfigMultiThread(); rc != 0 { return protocol.Error{Message: C.GoString(C.sqlite3_errstr(rc)), Code: int(rc)} } return nil } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/000077500000000000000000000000001447672437700232445ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/buffer.go000066400000000000000000000002671447672437700250510ustar00rootroot00000000000000package protocol // Buffer for reading responses or writing requests. type buffer struct { Bytes []byte Offset int } func (b *buffer) Advance(amount int) { b.Offset += amount } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/config.go000066400000000000000000000011141447672437700250350ustar00rootroot00000000000000package protocol import ( "time" ) // Config holds various configuration parameters for a cowsql client. type Config struct { Dial DialFunc // Network dialer. DialTimeout time.Duration // Timeout for establishing a network connection . AttemptTimeout time.Duration // Timeout for each individual attempt to probe a server's leadership. BackoffFactor time.Duration // Exponential backoff factor for retries. BackoffCap time.Duration // Maximum connection retry backoff value, RetryLimit uint // Maximum number of retries, or 0 for unlimited. } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/connector.go000066400000000000000000000213301447672437700255640ustar00rootroot00000000000000package protocol import ( "context" "encoding/binary" "fmt" "io" "net" "sort" "time" "github.com/Rican7/retry" "github.com/Rican7/retry/backoff" "github.com/Rican7/retry/strategy" "github.com/cowsql/go-cowsql/logging" "github.com/pkg/errors" ) // DialFunc is a function that can be used to establish a network connection. type DialFunc func(context.Context, string) (net.Conn, error) // Connector is in charge of creating a cowsql SQL client connected to the // current leader of a cluster. type Connector struct { id uint64 // Conn ID to use when registering against the server. store NodeStore // Used to get and update current cluster servers. config Config // Connection parameters. log logging.Func // Logging function. } // NewConnector returns a new connector that can be used by a cowsql driver to // create new clients connected to a leader cowsql server. func NewConnector(id uint64, store NodeStore, config Config, log logging.Func) *Connector { if config.Dial == nil { config.Dial = Dial } if config.DialTimeout == 0 { config.DialTimeout = 5 * time.Second } if config.AttemptTimeout == 0 { config.AttemptTimeout = 15 * time.Second } if config.BackoffFactor == 0 { config.BackoffFactor = 100 * time.Millisecond } if config.BackoffCap == 0 { config.BackoffCap = time.Second } connector := &Connector{ id: id, store: store, config: config, log: log, } return connector } // Connect finds the leader server and returns a connection to it. // // If the connector is stopped before a leader is found, nil is returned. func (c *Connector) Connect(ctx context.Context) (*Protocol, error) { var protocol *Protocol strategies := makeRetryStrategies(c.config.BackoffFactor, c.config.BackoffCap, c.config.RetryLimit) // The retry strategy should be configured to retry indefinitely, until // the given context is done. err := retry.Retry(func(attempt uint) error { log := func(l logging.Level, format string, a ...interface{}) { format = fmt.Sprintf("attempt %d: ", attempt) + format c.log(l, format, a...) } select { case <-ctx.Done(): // Stop retrying return nil default: } var err error protocol, err = c.connectAttemptAll(ctx, log) if err != nil { return err } return nil }, strategies...) if err != nil { // We exhausted the number of retries allowed by the configured // strategy. return nil, ErrNoAvailableLeader } if ctx.Err() != nil { return nil, ErrNoAvailableLeader } // At this point we should have a connected protocol object, since the // retry loop didn't hit any error and the given context hasn't // expired. if protocol == nil { panic("no protocol object") } return protocol, nil } // Make a single attempt to establish a connection to the leader server trying // all addresses available in the store. func (c *Connector) connectAttemptAll(ctx context.Context, log logging.Func) (*Protocol, error) { servers, err := c.store.Get(ctx) if err != nil { return nil, errors.Wrap(err, "get servers") } // Sort servers by Role, from low to high. sort.Slice(servers, func(i, j int) bool { return servers[i].Role < servers[j].Role }) // Make an attempt for each address until we find the leader. for _, server := range servers { log := func(l logging.Level, format string, a ...interface{}) { format = fmt.Sprintf("server %s: ", server.Address) + format log(l, format, a...) } ctx, cancel := context.WithTimeout(ctx, c.config.AttemptTimeout) defer cancel() version := VersionOne protocol, leader, err := c.connectAttemptOne(ctx, server.Address, version) if err == errBadProtocol { log(logging.Warn, "unsupported protocol %d, attempt with legacy", version) version = VersionLegacy protocol, leader, err = c.connectAttemptOne(ctx, server.Address, version) } if err != nil { // This server is unavailable, try with the next target. log(logging.Warn, err.Error()) continue } if protocol != nil { // We found the leader log(logging.Debug, "connected") return protocol, nil } if leader == "" { // This server does not know who the current leader is, // try with the next target. log(logging.Warn, "no known leader") continue } // If we get here, it means this server reported that another // server is the leader, let's close the connection to this // server and try with the suggested one. log(logging.Debug, "connect to reported leader %s", leader) ctx, cancel = context.WithTimeout(ctx, c.config.AttemptTimeout) defer cancel() protocol, leader, err = c.connectAttemptOne(ctx, leader, version) if err != nil { // The leader reported by the previous server is // unavailable, try with the next target. log(logging.Warn, "reported leader unavailable err=%v", err) continue } if protocol == nil { // The leader reported by the target server does not consider itself // the leader, try with the next target. log(logging.Warn, "reported leader server is not the leader") continue } log(logging.Debug, "connected") return protocol, nil } return nil, ErrNoAvailableLeader } // Perform the initial handshake using the given protocol version. func Handshake(ctx context.Context, conn net.Conn, version uint64) (*Protocol, error) { // Latest protocol version. protocol := make([]byte, 8) binary.LittleEndian.PutUint64(protocol, version) // Honor the ctx deadline, if present. if deadline, ok := ctx.Deadline(); ok { conn.SetDeadline(deadline) defer conn.SetDeadline(time.Time{}) } // Perform the protocol handshake. n, err := conn.Write(protocol) if err != nil { return nil, errors.Wrap(err, "write handshake") } if n != 8 { return nil, errors.Wrap(io.ErrShortWrite, "short handshake write") } return newProtocol(version, conn), nil } // Connect to the given cowsql server and check if it's the leader. // // Return values: // // - Any failure is hit: -> nil, "", err // - Target not leader and no leader known: -> nil, "", nil // - Target not leader and leader known: -> nil, leader, nil // - Target is the leader: -> server, "", nil func (c *Connector) connectAttemptOne(ctx context.Context, address string, version uint64) (*Protocol, string, error) { dialCtx, cancel := context.WithTimeout(ctx, c.config.DialTimeout) defer cancel() // Establish the connection. conn, err := c.config.Dial(dialCtx, address) if err != nil { return nil, "", errors.Wrap(err, "dial") } protocol, err := Handshake(ctx, conn, version) if err != nil { conn.Close() return nil, "", err } // Send the initial Leader request. request := Message{} request.Init(16) response := Message{} response.Init(512) EncodeLeader(&request) if err := protocol.Call(ctx, &request, &response); err != nil { protocol.Close() cause := errors.Cause(err) // Best-effort detection of a pre-1.0 cowsql node: when sent // version 1 it should close the connection immediately. if err, ok := cause.(*net.OpError); ok && !err.Timeout() || cause == io.EOF { return nil, "", errBadProtocol } return nil, "", err } _, leader, err := DecodeNodeCompat(protocol, &response) if err != nil { protocol.Close() return nil, "", err } switch leader { case "": // Currently this server does not know about any leader. protocol.Close() return nil, "", nil case address: // This server is the leader, register ourselves and return. request.reset() response.reset() EncodeClient(&request, c.id) if err := protocol.Call(ctx, &request, &response); err != nil { protocol.Close() return nil, "", err } _, err := DecodeWelcome(&response) if err != nil { protocol.Close() return nil, "", err } // TODO: enable heartbeat // protocol.heartbeatTimeout = time.Duration(heartbeatTimeout) * time.Millisecond // go protocol.heartbeat() return protocol, "", nil default: // This server claims to know who the current leader is. protocol.Close() return nil, leader, nil } } // Return a retry strategy with exponential backoff, capped at the given amount // of time and possibly with a maximum number of retries. func makeRetryStrategies(factor, cap time.Duration, limit uint) []strategy.Strategy { limit += 1 // Fix for change in behavior: https://github.com/Rican7/retry/pull/12 backoff := backoff.BinaryExponential(factor) strategies := []strategy.Strategy{} if limit > 1 { strategies = append(strategies, strategy.Limit(limit)) } strategies = append(strategies, func(attempt uint) bool { if attempt > 0 { duration := backoff(attempt) // Duration might be negative in case of integer overflow. if duration > cap || duration <= 0 { duration = cap } time.Sleep(duration) } return true }, ) return strategies } var errBadProtocol = fmt.Errorf("bad protocol") golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/connector_test.go000066400000000000000000000241511447672437700266270ustar00rootroot00000000000000package protocol_test import ( "context" "fmt" "io/ioutil" "net" "os" "testing" "time" "github.com/cowsql/go-cowsql/internal/bindings" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/cowsql/go-cowsql/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Successful connection. func TestConnector_Success(t *testing.T) { address, cleanup := newNode(t, 0) defer cleanup() store := newStore(t, []string{address}) log, check := newLogFunc(t) connector := protocol.NewConnector(0, store, protocol.Config{}, log) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() client, err := connector.Connect(ctx) require.NoError(t, err) assert.NoError(t, client.Close()) check([]string{ "DEBUG: attempt 1: server @test-0: connected", }) } // The network connection can't be established within the specified number of // attempts. func TestConnector_LimitRetries(t *testing.T) { store := newStore(t, []string{"@test-123"}) config := protocol.Config{ RetryLimit: 2, } log, check := newLogFunc(t) connector := protocol.NewConnector(0, store, config, log) _, err := connector.Connect(context.Background()) assert.Equal(t, protocol.ErrNoAvailableLeader, err) check([]string{ "WARN: attempt 1: server @test-123: dial: dial unix @test-123: connect: connection refused", "WARN: attempt 2: server @test-123: dial: dial unix @test-123: connect: connection refused", "WARN: attempt 3: server @test-123: dial: dial unix @test-123: connect: connection refused", }) } // The network connection can't be established because of a connection timeout. func TestConnector_DialTimeout(t *testing.T) { store := newStore(t, []string{"8.8.8.8:9000"}) log, check := newLogFunc(t) config := protocol.Config{ DialTimeout: 50 * time.Millisecond, RetryLimit: 1, } connector := protocol.NewConnector(0, store, config, log) _, err := connector.Connect(context.Background()) assert.Equal(t, protocol.ErrNoAvailableLeader, err) check([]string{ "WARN: attempt 1: server 8.8.8.8:9000: dial: dial tcp 8.8.8.8:9000: i/o timeout", "WARN: attempt 2: server 8.8.8.8:9000: dial: dial tcp 8.8.8.8:9000: i/o timeout", }) } // Connection failed because the server store is empty. func TestConnector_EmptyNodeStore(t *testing.T) { store := newStore(t, []string{}) log, check := newLogFunc(t) connector := protocol.NewConnector(0, store, protocol.Config{}, log) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond) defer cancel() _, err := connector.Connect(ctx) assert.Equal(t, protocol.ErrNoAvailableLeader, err) check([]string{}) } // Connection failed because the context was canceled. func TestConnector_ContextCanceled(t *testing.T) { store := newStore(t, []string{"1.2.3.4:666"}) log, check := newLogFunc(t) connector := protocol.NewConnector(0, store, protocol.Config{}, log) ctx, cancel := context.WithTimeout(context.Background(), 25*time.Millisecond) defer cancel() _, err := connector.Connect(ctx) assert.Equal(t, protocol.ErrNoAvailableLeader, err) check([]string{ "WARN: attempt 1: server 1.2.3.4:666: dial: dial tcp 1.2.3.4:666: i/o timeout", }) } // Simulate a server which accepts the connection but doesn't reply within the // attempt timeout. func TestConnector_AttemptTimeout(t *testing.T) { listener, err := net.Listen("unix", "@1234") require.NoError(t, err) store := newStore(t, []string{listener.Addr().String()}) config := protocol.Config{ AttemptTimeout: 100 * time.Millisecond, RetryLimit: 1, } connector := protocol.NewConnector(0, store, config, logging.Test(t)) var conn net.Conn go func() { conn, err = listener.Accept() require.NoError(t, err) require.NotNil(t, conn) }() defer func() { if conn != nil { _ = conn.Close() } }() _, err = connector.Connect(context.Background()) assert.Equal(t, protocol.ErrNoAvailableLeader, err) } // If an election is in progress, the connector will retry until a leader gets // elected. // func TestConnector_Connect_ElectionInProgress(t *testing.T) { // address1, cleanup := newNode(t, 1) // defer cleanup() // address2, cleanup := newNode(t, 2) // defer cleanup() // address3, cleanup := newNode(t, 3) // defer cleanup() // store := newStore(t, []string{address1, address2, address3}) // connector := newConnector(t, store) // go func() { // // Simulate server 1 winning the election after 10ms // time.Sleep(10 * time.Millisecond) // }() // ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) // defer cancel() // client, err := connector.Connect(ctx) // require.NoError(t, err) // assert.NoError(t, client.Close()) // } // If a server reports that it knows about the leader, the hint will be taken // and an attempt will be made to connect to it. // func TestConnector_Connect_NodeKnowsAboutLeader(t *testing.T) { // defer bindings.AssertNoMemoryLeaks(t) // methods1 := &testClusterMethods{} // methods2 := &testClusterMethods{} // methods3 := &testClusterMethods{} // address1, cleanup := newNode(t, 1, methods1) // defer cleanup() // address2, cleanup := newNode(t, 2, methods2) // defer cleanup() // address3, cleanup := newNode(t, 3, methods3) // defer cleanup() // // Node 1 will be contacted first, which will report that server 2 is // // the leader. // store := newStore(t, []string{address1, address2, address3}) // methods1.leader = address2 // methods2.leader = address2 // methods3.leader = address2 // connector := newConnector(t, store) // ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) // defer cancel() // client, err := connector.Connect(ctx) // require.NoError(t, err) // assert.NoError(t, client.Close()) // } // If a server reports that it knows about the leader, the hint will be taken // and an attempt will be made to connect to it. If that leader has died, the // next target will be tried. // func TestConnector_Connect_NodeKnowsAboutDeadLeader(t *testing.T) { // defer bindings.AssertNoMemoryLeaks(t) // methods1 := &testClusterMethods{} // methods2 := &testClusterMethods{} // methods3 := &testClusterMethods{} // address1, cleanup := newNode(t, 1, methods1) // defer cleanup() // address2, cleanup := newNode(t, 2, methods2) // // Simulate server 2 crashing. // cleanup() // address3, cleanup := newNode(t, 3, methods3) // defer cleanup() // // Node 1 will be contacted first, which will report that server 2 is // // the leader. However server 2 has crashed, and after a bit server 1 // // gets elected. // store := newStore(t, []string{address1, address2, address3}) // methods1.leader = address2 // methods3.leader = address2 // go func() { // // Simulate server 1 becoming the new leader after server 2 // // crashed. // time.Sleep(10 * time.Millisecond) // methods1.leader = address1 // methods3.leader = address1 // }() // connector := newConnector(t, store) // ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) // defer cancel() // client, err := connector.Connect(ctx) // require.NoError(t, err) // assert.NoError(t, client.Close()) // } // If a server reports that it knows about the leader, the hint will be taken // and an attempt will be made to connect to it. If that leader is not actually // the leader the next target will be tried. // func TestConnector_Connect_NodeKnowsAboutStaleLeader(t *testing.T) { // defer bindings.AssertNoMemoryLeaks(t) // methods1 := &testClusterMethods{} // methods2 := &testClusterMethods{} // methods3 := &testClusterMethods{} // address1, cleanup := newNode(t, 1, methods1) // defer cleanup() // address2, cleanup := newNode(t, 2, methods2) // defer cleanup() // address3, cleanup := newNode(t, 3, methods3) // defer cleanup() // // Node 1 will be contacted first, which will report that server 2 is // // the leader. However server 2 thinks that 3 is the leader, and server // // 3 is actually the leader. // store := newStore(t, []string{address1, address2, address3}) // methods1.leader = address2 // methods2.leader = address3 // methods3.leader = address3 // connector := newConnector(t, store) // ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) // defer cancel() // client, err := connector.Connect(ctx) // require.NoError(t, err) // assert.NoError(t, client.Close()) // } // Return a log function that emits messages using the test logger as well as // collecting them into a slice. The second function returned can be used to // assert that the collected messages match the given ones. func newLogFunc(t *testing.T) (logging.Func, func([]string)) { messages := []string{} log := func(l logging.Level, format string, a ...interface{}) { message := l.String() + ": " + fmt.Sprintf(format, a...) messages = append(messages, message) t.Log(message) } check := func(expected []string) { assert.Equal(t, expected, messages) } return log, check } // Create a new in-memory server store populated with the given addresses. func newStore(t *testing.T, addresses []string) protocol.NodeStore { t.Helper() servers := make([]protocol.NodeInfo, len(addresses)) for i, address := range addresses { servers[i].ID = uint64(i) servers[i].Address = address } store := protocol.NewInmemNodeStore() require.NoError(t, store.Set(context.Background(), servers)) return store } func newNode(t *testing.T, index int) (string, func()) { t.Helper() id := uint64(index + 1) dir, dirCleanup := newDir(t) address := fmt.Sprintf("@test-%d", index) server, err := bindings.NewNode(context.Background(), id, address, dir) require.NoError(t, err) err = server.SetBindAddress(address) require.NoError(t, err) require.NoError(t, server.Start()) cleanup := func() { require.NoError(t, server.Stop()) server.Close() dirCleanup() } return address, cleanup } // Return a new temporary directory. func newDir(t *testing.T) (string, func()) { t.Helper() dir, err := ioutil.TempDir("", "cowsql-connector-test-") assert.NoError(t, err) cleanup := func() { _, err := os.Stat(dir) if err != nil { assert.True(t, os.IsNotExist(err)) } else { assert.NoError(t, os.RemoveAll(dir)) } } return dir, cleanup } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/constants.go000066400000000000000000000053771447672437700256230ustar00rootroot00000000000000package protocol // VersionOne is version 1 of the server protocol. const VersionOne = uint64(1) // VersionLegacy is the pre 1.0 cowsql server protocol version. const VersionLegacy = uint64(0x86104dd760433fe5) // Cluster response formats const ( ClusterFormatV0 = 0 ClusterFormatV1 = 1 ) // Node roles const ( Voter = NodeRole(0) StandBy = NodeRole(1) Spare = NodeRole(2) ) // SQLite datatype codes const ( Integer = 1 Float = 2 Text = 3 Blob = 4 Null = 5 ) // Special data types for time values. const ( UnixTime = 9 ISO8601 = 10 Boolean = 11 ) // Request types. const ( RequestLeader = 0 RequestClient = 1 RequestHeartbeat = 2 RequestOpen = 3 RequestPrepare = 4 RequestExec = 5 RequestQuery = 6 RequestFinalize = 7 RequestExecSQL = 8 RequestQuerySQL = 9 RequestInterrupt = 10 RequestAdd = 12 RequestAssign = 13 RequestRemove = 14 RequestDump = 15 RequestCluster = 16 RequestTransfer = 17 RequestDescribe = 18 RequestWeight = 19 ) // Formats const ( RequestDescribeFormatV0 = 0 ) // Response types. const ( ResponseFailure = 0 ResponseNode = 1 ResponseNodeLegacy = 1 ResponseWelcome = 2 ResponseNodes = 3 ResponseDb = 4 ResponseStmt = 5 ResponseResult = 6 ResponseRows = 7 ResponseEmpty = 8 ResponseFiles = 9 ResponseMetadata = 10 ) // Human-readable description of a request type. func requestDesc(code uint8) string { switch code { // Requests case RequestLeader: return "leader" case RequestClient: return "client" case RequestHeartbeat: return "heartbeat" case RequestOpen: return "open" case RequestPrepare: return "prepare" case RequestExec: return "exec" case RequestQuery: return "query" case RequestFinalize: return "finalize" case RequestExecSQL: return "exec-sql" case RequestQuerySQL: return "query-sql" case RequestInterrupt: return "interrupt" case RequestAdd: return "add" case RequestAssign: return "assign" case RequestRemove: return "remove" case RequestDump: return "dump" case RequestCluster: return "cluster" case RequestTransfer: return "transfer" case RequestDescribe: return "describe" } return "unknown" } // Human-readable description of a response type. func responseDesc(code uint8) string { switch code { case ResponseFailure: return "failure" case ResponseNode: return "node" case ResponseWelcome: return "welcome" case ResponseNodes: return "nodes" case ResponseDb: return "db" case ResponseStmt: return "stmt" case ResponseResult: return "result" case ResponseRows: return "rows" case ResponseEmpty: return "empty" case ResponseFiles: return "files" case ResponseMetadata: return "metadata" } return "unknown" } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/dial.go000066400000000000000000000005241447672437700245050ustar00rootroot00000000000000package protocol import ( "context" "net" "strings" ) // Dial function handling plain TCP and Unix socket endpoints. func Dial(ctx context.Context, address string) (net.Conn, error) { family := "tcp" if strings.HasPrefix(address, "@") { family = "unix" } dialer := net.Dialer{} return dialer.DialContext(ctx, family, address) } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/errors.go000066400000000000000000000017451447672437700251160ustar00rootroot00000000000000package protocol import ( "fmt" ) // Client errors. var ( ErrNoAvailableLeader = fmt.Errorf("no available cowsql leader server found") errStop = fmt.Errorf("connector was stopped") errStaleLeader = fmt.Errorf("server has lost leadership") errNotClustered = fmt.Errorf("server is not clustered") errNegativeRead = fmt.Errorf("reader returned negative count from Read") errMessageEOF = fmt.Errorf("message eof") ) // ErrRequest is returned in case of request failure. type ErrRequest struct { Code uint64 Description string } func (e ErrRequest) Error() string { return fmt.Sprintf("%s (%d)", e.Description, e.Code) } // ErrRowsPart is returned when the first batch of a multi-response result // batch is done. var ErrRowsPart = fmt.Errorf("not all rows were returned in this response") // Error holds information about a SQLite error. type Error struct { Code int Message string } func (e Error) Error() string { return e.Message } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/message.go000066400000000000000000000351161447672437700252250ustar00rootroot00000000000000package protocol import ( "bytes" "database/sql/driver" "encoding/binary" "fmt" "io" "math" "strings" "time" ) // NamedValues is a type alias of a slice of driver.NamedValue. It's used by // schema.sh to generate encoding logic for statement parameters. type NamedValues = []driver.NamedValue type NamedValues32 = []driver.NamedValue // Nodes is a type alias of a slice of NodeInfo. It's used by schema.sh to // generate decoding logic for the heartbeat response. type Nodes []NodeInfo // Message holds data about a single request or response. type Message struct { words uint32 mtype uint8 schema uint8 extra uint16 header []byte // Statically allocated header buffer body buffer // Message body data. } // Init initializes the message using the given initial size for the data // buffer, which is re-used across requests or responses encoded or decoded // using this message object. func (m *Message) Init(initialBufferSize int) { if (initialBufferSize % messageWordSize) != 0 { panic("initial buffer size is not aligned to word boundary") } m.header = make([]byte, messageHeaderSize) m.body.Bytes = make([]byte, initialBufferSize) m.reset() } // Reset the state of the message so it can be used to encode or decode again. func (m *Message) reset() { m.words = 0 m.mtype = 0 m.schema = 0 m.extra = 0 for i := 0; i < messageHeaderSize; i++ { m.header[i] = 0 } m.body.Offset = 0 } // Append a byte slice to the message. func (m *Message) putBlob(v []byte) { size := len(v) m.putUint64(uint64(size)) pad := 0 if (size % messageWordSize) != 0 { // Account for padding pad = messageWordSize - (size % messageWordSize) size += pad } b := m.bufferForPut(size) defer b.Advance(size) // Copy the bytes into the buffer. offset := b.Offset copy(b.Bytes[offset:], v) offset += len(v) // Add padding for i := 0; i < pad; i++ { b.Bytes[offset] = 0 offset++ } } // Append a string to the message. func (m *Message) putString(v string) { size := len(v) + 1 pad := 0 if (size % messageWordSize) != 0 { // Account for padding pad = messageWordSize - (size % messageWordSize) size += pad } b := m.bufferForPut(size) defer b.Advance(size) // Copy the string bytes into the buffer. offset := b.Offset copy(b.Bytes[offset:], v) offset += len(v) // Add a nul byte b.Bytes[offset] = 0 offset++ // Add padding for i := 0; i < pad; i++ { b.Bytes[offset] = 0 offset++ } } // Append a byte to the message. func (m *Message) putUint8(v uint8) { b := m.bufferForPut(1) defer b.Advance(1) b.Bytes[b.Offset] = v } // Append a 2-byte word to the message. func (m *Message) putUint16(v uint16) { b := m.bufferForPut(2) defer b.Advance(2) binary.LittleEndian.PutUint16(b.Bytes[b.Offset:], v) } // Append a 4-byte word to the message. func (m *Message) putUint32(v uint32) { b := m.bufferForPut(4) defer b.Advance(4) binary.LittleEndian.PutUint32(b.Bytes[b.Offset:], v) } // Append an 8-byte word to the message. func (m *Message) putUint64(v uint64) { b := m.bufferForPut(8) defer b.Advance(8) binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], v) } // Append a signed 8-byte word to the message. func (m *Message) putInt64(v int64) { b := m.bufferForPut(8) defer b.Advance(8) binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], uint64(v)) } // Append a floating point number to the message. func (m *Message) putFloat64(v float64) { b := m.bufferForPut(8) defer b.Advance(8) binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], math.Float64bits(v)) } func (m *Message) putNamedValuesInner(values NamedValues) { for i := range values { if values[i].Ordinal != i+1 { panic("unexpected ordinal") } switch values[i].Value.(type) { case int64: m.putUint8(Integer) case float64: m.putUint8(Float) case bool: m.putUint8(Boolean) case []byte: m.putUint8(Blob) case string: m.putUint8(Text) case nil: m.putUint8(Null) case time.Time: m.putUint8(ISO8601) default: panic("unsupported value type") } } b := m.bufferForPut(1) if trailing := b.Offset % messageWordSize; trailing != 0 { // Skip padding bytes b.Advance(messageWordSize - trailing) } for i := range values { switch v := values[i].Value.(type) { case int64: m.putInt64(v) case float64: m.putFloat64(v) case bool: if v { m.putUint64(1) } else { m.putUint64(0) } case []byte: m.putBlob(v) case string: m.putString(v) case nil: m.putInt64(0) case time.Time: timestamp := v.Format(iso8601Formats[0]) m.putString(timestamp) default: panic("unsupported value type") } } } // Encode the given driver values as binding parameters. func (m *Message) putNamedValues(values NamedValues) { l := len(values) if l == 0 { return } else if l > math.MaxUint8 { // safeguard, should have been checked beforehand. panic("too many parameters") } n := uint8(l) m.putUint8(n) m.putNamedValuesInner(values) } // Encode the given driver values as binding parameters, with a 32-bit // parameter count (new format). func (m *Message) putNamedValues32(values NamedValues) { l := len(values) if l == 0 { return } else if int64(l) > math.MaxUint32 { // safeguard, should have been checked beforehand. panic("too many parameters") } n := uint32(l) m.putUint32(n) m.putNamedValuesInner(values) } // Finalize the message by setting the message type and the number // of words in the body (calculated from the body size). func (m *Message) putHeader(mtype, schema uint8) { if m.body.Offset <= 0 { panic("static offset is not positive") } if (m.body.Offset % messageWordSize) != 0 { panic("static body is not aligned") } m.mtype = mtype m.schema = schema m.extra = 0 m.words = uint32(m.body.Offset) / messageWordSize m.finalize() } func (m *Message) finalize() { if m.words == 0 { panic("empty message body") } binary.LittleEndian.PutUint32(m.header[0:], m.words) m.header[4] = m.mtype m.header[5] = m.schema binary.LittleEndian.PutUint16(m.header[6:], m.extra) } func (m *Message) bufferForPut(size int) *buffer { for (m.body.Offset + size) > len(m.body.Bytes) { // Grow message buffer. bytes := make([]byte, len(m.body.Bytes)*2) copy(bytes, m.body.Bytes) m.body.Bytes = bytes } return &m.body } // Return the message type and its schema version. func (m *Message) getHeader() (uint8, uint8) { return m.mtype, m.schema } // Read a string from the message body. func (m *Message) getString() string { b := m.bufferForGet() index := bytes.IndexByte(b.Bytes[b.Offset:], 0) if index == -1 { panic("no string found") } s := string(b.Bytes[b.Offset : b.Offset+index]) index++ if trailing := index % messageWordSize; trailing != 0 { // Account for padding, moving index to the next word boundary. index += messageWordSize - trailing } b.Advance(index) return s } func (m *Message) getBlob() []byte { size := m.getUint64() data := make([]byte, size) for i := range data { data[i] = m.getUint8() } pad := 0 if (size % messageWordSize) != 0 { // Account for padding pad = int(messageWordSize - (size % messageWordSize)) } // Consume padding for i := 0; i < pad; i++ { m.getUint8() } return data } // Read a byte from the message body. func (m *Message) getUint8() uint8 { b := m.bufferForGet() defer b.Advance(1) return b.Bytes[b.Offset] } // Read a 2-byte word from the message body. func (m *Message) getUint16() uint16 { b := m.bufferForGet() defer b.Advance(2) return binary.LittleEndian.Uint16(b.Bytes[b.Offset:]) } // Read a 4-byte word from the message body. func (m *Message) getUint32() uint32 { b := m.bufferForGet() defer b.Advance(4) return binary.LittleEndian.Uint32(b.Bytes[b.Offset:]) } // Read reads an 8-byte word from the message body. func (m *Message) getUint64() uint64 { b := m.bufferForGet() defer b.Advance(8) return binary.LittleEndian.Uint64(b.Bytes[b.Offset:]) } // Read a signed 8-byte word from the message body. func (m *Message) getInt64() int64 { b := m.bufferForGet() defer b.Advance(8) return int64(binary.LittleEndian.Uint64(b.Bytes[b.Offset:])) } // Read a floating point number from the message body. func (m *Message) getFloat64() float64 { b := m.bufferForGet() defer b.Advance(8) return math.Float64frombits(binary.LittleEndian.Uint64(b.Bytes[b.Offset:])) } // Decode a list of server objects from the message body. func (m *Message) getNodes() Nodes { n := m.getUint64() servers := make(Nodes, n) for i := 0; i < int(n); i++ { servers[i].ID = m.getUint64() servers[i].Address = m.getString() servers[i].Role = NodeRole(m.getUint64()) } return servers } // Decode a statement result object from the message body. func (m *Message) getResult() Result { return Result{ LastInsertID: m.getUint64(), RowsAffected: m.getUint64(), } } // Decode a query result set object from the message body. func (m *Message) getRows() Rows { // Read the column count and column names. columns := make([]string, m.getUint64()) for i := range columns { columns[i] = m.getString() } rows := Rows{ Columns: columns, message: m, } return rows } func (m *Message) getFiles() Files { files := Files{ n: m.getUint64(), message: m, } return files } func (m *Message) hasBeenConsumed() bool { size := int(m.words * messageWordSize) return m.body.Offset == size } func (m *Message) lastByte() byte { size := int(m.words * messageWordSize) return m.body.Bytes[size-1] } func (m *Message) bufferForGet() *buffer { size := int(m.words * messageWordSize) // The static body has been exahusted, use the dynamic one. if m.body.Offset == size { err := fmt.Errorf("short message: type=%d words=%d off=%d", m.mtype, m.words, m.body.Offset) panic(err) } return &m.body } // Result holds the result of a statement. type Result struct { LastInsertID uint64 RowsAffected uint64 } // Rows holds a result set encoded in a message body. type Rows struct { Columns []string message *Message types []uint8 } // columnTypes returns the row's column types // if save is true, it will restore the buffer offset func (r *Rows) columnTypes(save bool) ([]uint8, error) { // use cached values if possible if not advancing the buffer offset if save && r.types != nil { return r.types, nil } // column types should never change between rows // use cached copy to allow getting types when no more rows if r.types == nil { r.types = make([]uint8, len(r.Columns)) } // If there are zero columns, no rows can be encoded or decoded, // so we signal EOF immediately. if len(r.types) == 0 { return r.types, io.EOF } // Each column needs a 4 byte slot to store the column type. The row // header must be padded to reach word boundary. headerBits := len(r.types) * 4 padBits := 0 if trailingBits := (headerBits % messageWordBits); trailingBits != 0 { padBits = (messageWordBits - trailingBits) } headerSize := (headerBits + padBits) / messageWordBits * messageWordSize for i := 0; i < headerSize; i++ { slot := r.message.getUint8() if slot == 0xee { // More rows are available. if save { r.message.bufferForGet().Advance(-(i + 1)) } return r.types, ErrRowsPart } if slot == 0xff { // Rows EOF marker if save { r.message.bufferForGet().Advance(-(i + 1)) } return r.types, io.EOF } index := i * 2 if index >= len(r.types) { continue // This is padding. } r.types[index] = slot & 0x0f index++ if index >= len(r.types) { continue // This is padding byte. } r.types[index] = slot >> 4 } if save { r.message.bufferForGet().Advance(-headerSize) } return r.types, nil } // Next returns the next row in the result set. func (r *Rows) Next(dest []driver.Value) error { types, err := r.columnTypes(false) if err != nil { return err } for i := range types { switch types[i] { case Integer: dest[i] = r.message.getInt64() case Float: dest[i] = r.message.getFloat64() case Blob: dest[i] = r.message.getBlob() case Text: dest[i] = r.message.getString() case Null: r.message.getUint64() dest[i] = nil case UnixTime: timestamp := time.Unix(r.message.getInt64(), 0) dest[i] = timestamp case ISO8601: value := r.message.getString() if value == "" { dest[i] = nil break } var t time.Time var timeVal time.Time var err error value = strings.TrimSuffix(value, "Z") for _, format := range iso8601Formats { if timeVal, err = time.ParseInLocation(format, value, time.UTC); err == nil { t = timeVal break } } if err != nil { return err } dest[i] = t case Boolean: dest[i] = r.message.getInt64() != 0 default: panic("unknown data type") } } return nil } // Close the result set and reset the underlying message. func (r *Rows) Close() error { // If we didn't go through all rows, let's look at the last byte. var err error if !r.message.hasBeenConsumed() { slot := r.message.lastByte() if slot == 0xee { // More rows are available. err = ErrRowsPart } else if slot == 0xff { // Rows EOF marker err = io.EOF } else { err = fmt.Errorf("unexpected end of message") } } r.message.reset() return err } // Files holds a set of files encoded in a message body. type Files struct { n uint64 message *Message } func (f *Files) Next() (string, []byte) { if f.n == 0 { return "", nil } f.n-- name := f.message.getString() length := f.message.getUint64() data := make([]byte, length) for i := 0; i < int(length); i++ { data[i] = f.message.getUint8() } return name, data } func (f *Files) Close() { f.message.reset() } const ( messageWordSize = 8 messageWordBits = messageWordSize * 8 messageHeaderSize = messageWordSize messageMaxConsecutiveEmptyReads = 100 ) var iso8601Formats = []string{ // By default, store timestamps with whatever timezone they come with. // When parsed, they will be returned with the same timezone. "2006-01-02 15:04:05.999999999-07:00", "2006-01-02T15:04:05.999999999-07:00", "2006-01-02 15:04:05.999999999", "2006-01-02T15:04:05.999999999", "2006-01-02 15:04:05", "2006-01-02T15:04:05", "2006-01-02 15:04", "2006-01-02T15:04", "2006-01-02", } // ColumnTypes returns the column types for the the result set. func (r *Rows) ColumnTypes() ([]string, error) { types, err := r.columnTypes(true) kinds := make([]string, len(types)) for i, t := range types { switch t { case Integer: kinds[i] = "INTEGER" case Float: kinds[i] = "FLOAT" case Blob: kinds[i] = "BLOB" case Text: kinds[i] = "TEXT" case Null: kinds[i] = "NULL" case UnixTime: kinds[i] = "TIME" case ISO8601: kinds[i] = "TIME" case Boolean: kinds[i] = "BOOL" default: return nil, fmt.Errorf("unknown data type: %d", t) } } return kinds, err } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/message_export_test.go000066400000000000000000000002241447672437700276550ustar00rootroot00000000000000package protocol func (m *Message) Body() ([]byte, int) { return m.body.Bytes, m.body.Offset } func (m *Message) Rewind() { m.body.Offset = 0 } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/message_internal_test.go000066400000000000000000000154321447672437700301570ustar00rootroot00000000000000package protocol import ( "fmt" "testing" "time" "unsafe" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMessage_StaticBytesAlignment(t *testing.T) { message := Message{} message.Init(4096) pointer := uintptr(unsafe.Pointer(&message.body.Bytes[0])) assert.Equal(t, uintptr(0), pointer%messageWordSize) } func TestMessage_putBlob(t *testing.T) { cases := []struct { Blob []byte Offset int }{ {[]byte{1, 2, 3, 4, 5}, 16}, {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, 16}, {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 24}, } message := Message{} message.Init(64) for _, c := range cases { t.Run(fmt.Sprintf("%d", c.Offset), func(t *testing.T) { message.putBlob(c.Blob) bytes, offset := message.Body() assert.Equal(t, bytes[8:len(c.Blob)+8], c.Blob) assert.Equal(t, offset, c.Offset) message.reset() }) } } func TestMessage_putString(t *testing.T) { cases := []struct { String string Offset int }{ {"hello", 8}, {"hello!!", 8}, {"hello world", 16}, } message := Message{} message.Init(16) for _, c := range cases { t.Run(c.String, func(t *testing.T) { message.putString(c.String) bytes, offset := message.Body() assert.Equal(t, string(bytes[:len(c.String)]), c.String) assert.Equal(t, offset, c.Offset) message.reset() }) } } func TestMessage_putUint8(t *testing.T) { message := Message{} message.Init(8) v := uint8(12) message.putUint8(v) bytes, offset := message.Body() assert.Equal(t, bytes[0], byte(v)) assert.Equal(t, offset, 1) } func TestMessage_putUint16(t *testing.T) { message := Message{} message.Init(8) v := uint16(666) message.putUint16(v) bytes, offset := message.Body() assert.Equal(t, bytes[0], byte((v & 0x00ff))) assert.Equal(t, bytes[1], byte((v&0xff00)>>8)) assert.Equal(t, offset, 2) } func TestMessage_putUint32(t *testing.T) { message := Message{} message.Init(8) v := uint32(130000) message.putUint32(v) bytes, offset := message.Body() assert.Equal(t, bytes[0], byte((v & 0x000000ff))) assert.Equal(t, bytes[1], byte((v&0x0000ff00)>>8)) assert.Equal(t, bytes[2], byte((v&0x00ff0000)>>16)) assert.Equal(t, bytes[3], byte((v&0xff000000)>>24)) assert.Equal(t, offset, 4) } func TestMessage_putUint64(t *testing.T) { message := Message{} message.Init(8) v := uint64(5000000000) message.putUint64(v) bytes, offset := message.Body() assert.Equal(t, bytes[0], byte((v & 0x00000000000000ff))) assert.Equal(t, bytes[1], byte((v&0x000000000000ff00)>>8)) assert.Equal(t, bytes[2], byte((v&0x0000000000ff0000)>>16)) assert.Equal(t, bytes[3], byte((v&0x00000000ff000000)>>24)) assert.Equal(t, bytes[4], byte((v&0x000000ff00000000)>>32)) assert.Equal(t, bytes[5], byte((v&0x0000ff0000000000)>>40)) assert.Equal(t, bytes[6], byte((v&0x00ff000000000000)>>48)) assert.Equal(t, bytes[7], byte((v&0xff00000000000000)>>56)) assert.Equal(t, offset, 8) } func TestMessage_putNamedValues(t *testing.T) { message := Message{} message.Init(256) timestamp, err := time.ParseInLocation("2006-01-02", "2018-08-01", time.UTC) require.NoError(t, err) values := NamedValues{ {Ordinal: 1, Value: int64(123)}, {Ordinal: 2, Value: float64(3.1415)}, {Ordinal: 3, Value: true}, {Ordinal: 4, Value: []byte{1, 2, 3, 4, 5, 6}}, {Ordinal: 5, Value: "hello"}, {Ordinal: 6, Value: nil}, {Ordinal: 7, Value: timestamp}, } message.putNamedValues(values) bytes, offset := message.Body() assert.Equal(t, 96, offset) assert.Equal(t, bytes[0], byte(7)) assert.Equal(t, bytes[1], byte(Integer)) assert.Equal(t, bytes[2], byte(Float)) assert.Equal(t, bytes[3], byte(Boolean)) assert.Equal(t, bytes[4], byte(Blob)) assert.Equal(t, bytes[5], byte(Text)) assert.Equal(t, bytes[6], byte(Null)) assert.Equal(t, bytes[7], byte(ISO8601)) } func TestMessage_putNamedValues32(t *testing.T) { message := Message{} message.Init(256) timestamp, err := time.ParseInLocation("2006-01-02", "2018-08-01", time.UTC) require.NoError(t, err) values := NamedValues{ {Ordinal: 1, Value: int64(123)}, {Ordinal: 2, Value: float64(3.1415)}, {Ordinal: 3, Value: true}, {Ordinal: 4, Value: []byte{1, 2, 3, 4, 5, 6}}, {Ordinal: 5, Value: "hello"}, {Ordinal: 6, Value: nil}, {Ordinal: 7, Value: timestamp}, } message.putNamedValues32(values) bytes, offset := message.Body() assert.Equal(t, 104, offset) assert.Equal(t, bytes[0], byte(7)) assert.Equal(t, bytes[1], byte(0)) assert.Equal(t, bytes[2], byte(0)) assert.Equal(t, bytes[3], byte(0)) assert.Equal(t, bytes[4], byte(Integer)) assert.Equal(t, bytes[5], byte(Float)) assert.Equal(t, bytes[6], byte(Boolean)) assert.Equal(t, bytes[7], byte(Blob)) assert.Equal(t, bytes[8], byte(Text)) assert.Equal(t, bytes[9], byte(Null)) assert.Equal(t, bytes[10], byte(ISO8601)) } func TestMessage_putHeader(t *testing.T) { message := Message{} message.Init(64) message.putString("hello") message.putHeader(RequestExec, 1) } func BenchmarkMessage_putString(b *testing.B) { message := Message{} message.Init(4096) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { message.reset() message.putString("hello") } } func BenchmarkMessage_putUint64(b *testing.B) { message := Message{} message.Init(4096) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { message.reset() message.putUint64(270) } } func TestMessage_getString(t *testing.T) { cases := []struct { String string Offset int }{ {"hello", 8}, {"hello!!", 8}, {"hello!!!", 16}, {"hello world", 16}, } for _, c := range cases { t.Run(c.String, func(t *testing.T) { message := Message{} message.Init(16) message.putString(c.String) message.putHeader(0, 0) message.Rewind() s := message.getString() _, offset := message.Body() assert.Equal(t, s, c.String) assert.Equal(t, offset, c.Offset) }) } } func TestMessage_getBlob(t *testing.T) { cases := []struct { Blob []byte Offset int }{ {[]byte{1, 2, 3, 4, 5}, 16}, {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, 16}, {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 24}, } for _, c := range cases { t.Run(fmt.Sprintf("%d", c.Offset), func(t *testing.T) { message := Message{} message.Init(64) message.putBlob(c.Blob) message.putHeader(0, 0) message.Rewind() bytes := message.getBlob() _, offset := message.Body() assert.Equal(t, bytes, c.Blob) assert.Equal(t, offset, c.Offset) }) } } // The overflowing string ends exactly at word boundary. func TestMessage_getString_Overflow_WordBoundary(t *testing.T) { message := Message{} message.Init(8) message.putBlob([]byte{ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 0, 0, 0, 0, 0, 0, 0, }) message.putHeader(0, 0) message.Rewind() message.getUint64() s := message.getString() assert.Equal(t, "abcdefghilmnopqr", s) assert.Equal(t, 32, message.body.Offset) } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/protocol.go000066400000000000000000000143331447672437700254400ustar00rootroot00000000000000package protocol import ( "context" "encoding/binary" "io" "net" "sync" "time" "github.com/pkg/errors" ) // Protocol sends and receive the cowsql message on the wire. type Protocol struct { version uint64 // Protocol version conn net.Conn // Underlying network connection. closeCh chan struct{} // Stops the heartbeat when the connection gets closed mu sync.Mutex // Serialize requests netErr error // A network error occurred } func newProtocol(version uint64, conn net.Conn) *Protocol { protocol := &Protocol{ version: version, conn: conn, closeCh: make(chan struct{}), } return protocol } // Call invokes a cowsql RPC, sending a request message and receiving a // response message. func (p *Protocol) Call(ctx context.Context, request, response *Message) (err error) { // We need to take a lock since the cowsql server currently does not // support concurrent requests. p.mu.Lock() defer p.mu.Unlock() if p.netErr != nil { return p.netErr } defer func() { if err == nil { return } switch errors.Cause(err).(type) { case *net.OpError: p.netErr = err } }() var budget time.Duration // Honor the ctx deadline, if present. if deadline, ok := ctx.Deadline(); ok { p.conn.SetDeadline(deadline) budget = time.Until(deadline) defer p.conn.SetDeadline(time.Time{}) } desc := requestDesc(request.mtype) if err = p.send(request); err != nil { return errors.Wrapf(err, "call %s (budget %s): send", desc, budget) } if err = p.recv(response); err != nil { return errors.Wrapf(err, "call %s (budget %s): receive", desc, budget) } return } // More is used when a request maps to multiple responses. func (p *Protocol) More(ctx context.Context, response *Message) error { return p.recv(response) } // Interrupt sends an interrupt request and awaits for the server's empty // response. func (p *Protocol) Interrupt(ctx context.Context, request *Message, response *Message) error { // We need to take a lock since the cowsql server currently does not // support concurrent requests. p.mu.Lock() defer p.mu.Unlock() // Honor the ctx deadline, if present. if deadline, ok := ctx.Deadline(); ok { p.conn.SetDeadline(deadline) defer p.conn.SetDeadline(time.Time{}) } EncodeInterrupt(request, 0) if err := p.send(request); err != nil { return errors.Wrap(err, "failed to send interrupt request") } for { if err := p.recv(response); err != nil { return errors.Wrap(err, "failed to receive response") } mtype, _ := response.getHeader() if mtype == ResponseEmpty { break } } return nil } // Close the client connection. func (p *Protocol) Close() error { close(p.closeCh) return p.conn.Close() } func (p *Protocol) send(req *Message) error { if err := p.sendHeader(req); err != nil { return errors.Wrap(err, "header") } if err := p.sendBody(req); err != nil { return errors.Wrap(err, "body") } return nil } func (p *Protocol) sendHeader(req *Message) error { n, err := p.conn.Write(req.header[:]) if err != nil { return err } if n != messageHeaderSize { return io.ErrShortWrite } return nil } func (p *Protocol) sendBody(req *Message) error { buf := req.body.Bytes[:req.body.Offset] n, err := p.conn.Write(buf) if err != nil { return err } if n != len(buf) { return io.ErrShortWrite } return nil } func (p *Protocol) recv(res *Message) error { res.reset() if err := p.recvHeader(res); err != nil { return errors.Wrap(err, "header") } if err := p.recvBody(res); err != nil { return errors.Wrap(err, "body") } return nil } func (p *Protocol) recvHeader(res *Message) error { if err := p.recvPeek(res.header); err != nil { return err } res.words = binary.LittleEndian.Uint32(res.header[0:]) res.mtype = res.header[4] res.schema = res.header[5] res.extra = binary.LittleEndian.Uint16(res.header[6:]) return nil } func (p *Protocol) recvBody(res *Message) error { n := int(res.words) * messageWordSize for n > len(res.body.Bytes) { // Grow message buffer. bytes := make([]byte, len(res.body.Bytes)*2) res.body.Bytes = bytes } buf := res.body.Bytes[:n] if err := p.recvPeek(buf); err != nil { return err } return nil } // Read until buf is full. func (p *Protocol) recvPeek(buf []byte) error { for offset := 0; offset < len(buf); { n, err := p.recvFill(buf[offset:]) if err != nil { return err } offset += n } return nil } // Try to fill buf, but perform at most one read. func (p *Protocol) recvFill(buf []byte) (int, error) { // Read new data: try a limited number of times. // // This technique is copied from bufio.Reader. for i := messageMaxConsecutiveEmptyReads; i > 0; i-- { n, err := p.conn.Read(buf) if n < 0 { panic(errNegativeRead) } if err != nil { return -1, err } if n > 0 { return n, nil } } return -1, io.ErrNoProgress } /* func (p *Protocol) heartbeat() { request := Message{} request.Init(16) response := Message{} response.Init(512) for { delay := c.heartbeatTimeout / 3 //c.logger.Debug("sending heartbeat", zap.Duration("delay", delay)) time.Sleep(delay) // Check if we've been closed. select { case <-c.closeCh: return default: } ctx, cancel := context.WithTimeout(context.Background(), time.Second) EncodeHeartbeat(&request, uint64(time.Now().Unix())) err := c.Call(ctx, &request, &response) // We bail out upon failures. // // TODO: make the client survive temporary disconnections. if err != nil { cancel() //c.logger.Error("heartbeat failed", zap.Error(err)) return } //addresses, err := DecodeNodes(&response) _, err = DecodeNodes(&response) if err != nil { cancel() //c.logger.Error("invalid heartbeat response", zap.Error(err)) return } // if err := c.store.Set(ctx, addresses); err != nil { // cancel() // c.logger.Error("failed to update servers", zap.Error(err)) // return // } cancel() request.Reset() response.Reset() } } */ // DecodeNodeCompat handles also pre-1.0 legacy server messages. func DecodeNodeCompat(protocol *Protocol, response *Message) (uint64, string, error) { if protocol.version == VersionLegacy { address, err := DecodeNodeLegacy(response) if err != nil { return 0, "", err } return 0, address, nil } return DecodeNode(response) } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/protocol_test.go000066400000000000000000000107111447672437700264730ustar00rootroot00000000000000package protocol_test import ( "context" "testing" "time" "github.com/cowsql/go-cowsql/internal/protocol" "github.com/cowsql/go-cowsql/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // func TestProtocol_Heartbeat(t *testing.T) { // c, cleanup := newProtocol(t) // defer cleanup() // request, response := newMessagePair(512, 512) // protocol.EncodeHeartbeat(&request, uint64(time.Now().Unix())) // makeCall(t, c, &request, &response) // servers, err := protocol.DecodeNodes(&response) // require.NoError(t, err) // assert.Len(t, servers, 2) // assert.Equal(t, client.Nodes{ // {ID: uint64(1), Address: "1.2.3.4:666"}, // {ID: uint64(2), Address: "5.6.7.8:666"}}, // servers) // } // Test sending a request that needs to be written into the dynamic buffer. func TestProtocol_RequestWithDynamicBuffer(t *testing.T) { p, cleanup := newProtocol(t) defer cleanup() request, response := newMessagePair(64, 64) protocol.EncodeOpen(&request, "test.db", 0, "test-0") makeCall(t, p, &request, &response) id, err := protocol.DecodeDb(&response) require.NoError(t, err) sql := ` CREATE TABLE foo (n INT); CREATE TABLE bar (n INT); CREATE TABLE egg (n INT); CREATE TABLE baz (n INT); ` protocol.EncodeExecSQLV0(&request, uint64(id), sql, nil) makeCall(t, p, &request, &response) } func TestProtocol_Prepare(t *testing.T) { c, cleanup := newProtocol(t) defer cleanup() request, response := newMessagePair(64, 64) protocol.EncodeOpen(&request, "test.db", 0, "test-0") makeCall(t, c, &request, &response) db, err := protocol.DecodeDb(&response) require.NoError(t, err) protocol.EncodePrepare(&request, uint64(db), "CREATE TABLE test (n INT)") makeCall(t, c, &request, &response) _, stmt, params, err := protocol.DecodeStmt(&response) require.NoError(t, err) assert.Equal(t, uint32(0), stmt) assert.Equal(t, uint64(0), params) } /* func TestProtocol_Exec(t *testing.T) { client, cleanup := newProtocol(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) defer cancel() db, err := client.Open(ctx, "test.db", "volatile") require.NoError(t, err) stmt, err := client.Prepare(ctx, db.ID, "CREATE TABLE test (n INT)") require.NoError(t, err) _, err = client.Exec(ctx, db.ID, stmt.ID) require.NoError(t, err) } func TestProtocol_Query(t *testing.T) { client, cleanup := newProtocol(t) defer cleanup() ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) defer cancel() db, err := client.Open(ctx, "test.db", "volatile") require.NoError(t, err) start := time.Now() stmt, err := client.Prepare(ctx, db.ID, "CREATE TABLE test (n INT)") require.NoError(t, err) _, err = client.Exec(ctx, db.ID, stmt.ID) require.NoError(t, err) _, err = client.Finalize(ctx, db.ID, stmt.ID) require.NoError(t, err) stmt, err = client.Prepare(ctx, db.ID, "INSERT INTO test VALUES(1)") require.NoError(t, err) _, err = client.Exec(ctx, db.ID, stmt.ID) require.NoError(t, err) _, err = client.Finalize(ctx, db.ID, stmt.ID) require.NoError(t, err) stmt, err = client.Prepare(ctx, db.ID, "SELECT n FROM test") require.NoError(t, err) _, err = client.Query(ctx, db.ID, stmt.ID) require.NoError(t, err) _, err = client.Finalize(ctx, db.ID, stmt.ID) require.NoError(t, err) fmt.Printf("time %s\n", time.Since(start)) } */ func newProtocol(t *testing.T) (*protocol.Protocol, func()) { t.Helper() address, serverCleanup := newNode(t, 0) store := newStore(t, []string{address}) config := protocol.Config{ AttemptTimeout: 100 * time.Millisecond, BackoffFactor: time.Millisecond, } connector := protocol.NewConnector(0, store, config, logging.Test(t)) ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) defer cancel() client, err := connector.Connect(ctx) require.NoError(t, err) cleanup := func() { client.Close() serverCleanup() } return client, cleanup } // Perform a client call. func makeCall(t *testing.T, p *protocol.Protocol, request, response *protocol.Message) { ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) defer cancel() err := p.Call(ctx, request, response) require.NoError(t, err) } // Return a new message pair to be used as request and response. func newMessagePair(size1, size2 int) (protocol.Message, protocol.Message) { message1 := protocol.Message{} message1.Init(size1) message2 := protocol.Message{} message2.Init(size2) return message1, message2 } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/request.go000066400000000000000000000117411447672437700252670ustar00rootroot00000000000000package protocol // DO NOT EDIT // // This file was generated by ./schema.sh // EncodeLeader encodes a Leader request. func EncodeLeader(request *Message) { request.reset() request.putUint64(0) request.putHeader(RequestLeader, 0) } // EncodeClient encodes a Client request. func EncodeClient(request *Message, id uint64) { request.reset() request.putUint64(id) request.putHeader(RequestClient, 0) } // EncodeHeartbeat encodes a Heartbeat request. func EncodeHeartbeat(request *Message, timestamp uint64) { request.reset() request.putUint64(timestamp) request.putHeader(RequestHeartbeat, 0) } // EncodeOpen encodes a Open request. func EncodeOpen(request *Message, name string, flags uint64, vfs string) { request.reset() request.putString(name) request.putUint64(flags) request.putString(vfs) request.putHeader(RequestOpen, 0) } // EncodePrepare encodes a Prepare request. func EncodePrepare(request *Message, db uint64, sql string) { request.reset() request.putUint64(db) request.putString(sql) request.putHeader(RequestPrepare, 0) } // EncodeExecV0 encodes a Exec request. func EncodeExecV0(request *Message, db uint32, stmt uint32, values NamedValues) { request.reset() request.putUint32(db) request.putUint32(stmt) request.putNamedValues(values) request.putHeader(RequestExec, 0) } // EncodeExecV1 encodes a Exec request. func EncodeExecV1(request *Message, db uint32, stmt uint32, values NamedValues32) { request.reset() request.putUint32(db) request.putUint32(stmt) request.putNamedValues32(values) request.putHeader(RequestExec, 1) } // EncodeQueryV0 encodes a Query request. func EncodeQueryV0(request *Message, db uint32, stmt uint32, values NamedValues) { request.reset() request.putUint32(db) request.putUint32(stmt) request.putNamedValues(values) request.putHeader(RequestQuery, 0) } // EncodeQueryV1 encodes a Query request. func EncodeQueryV1(request *Message, db uint32, stmt uint32, values NamedValues32) { request.reset() request.putUint32(db) request.putUint32(stmt) request.putNamedValues32(values) request.putHeader(RequestQuery, 1) } // EncodeFinalize encodes a Finalize request. func EncodeFinalize(request *Message, db uint32, stmt uint32) { request.reset() request.putUint32(db) request.putUint32(stmt) request.putHeader(RequestFinalize, 0) } // EncodeExecSQLV0 encodes a ExecSQL request. func EncodeExecSQLV0(request *Message, db uint64, sql string, values NamedValues) { request.reset() request.putUint64(db) request.putString(sql) request.putNamedValues(values) request.putHeader(RequestExecSQL, 0) } // EncodeExecSQLV1 encodes a ExecSQL request. func EncodeExecSQLV1(request *Message, db uint64, sql string, values NamedValues32) { request.reset() request.putUint64(db) request.putString(sql) request.putNamedValues32(values) request.putHeader(RequestExecSQL, 1) } // EncodeQuerySQLV0 encodes a QuerySQL request. func EncodeQuerySQLV0(request *Message, db uint64, sql string, values NamedValues) { request.reset() request.putUint64(db) request.putString(sql) request.putNamedValues(values) request.putHeader(RequestQuerySQL, 0) } // EncodeQuerySQLV1 encodes a QuerySQL request. func EncodeQuerySQLV1(request *Message, db uint64, sql string, values NamedValues32) { request.reset() request.putUint64(db) request.putString(sql) request.putNamedValues32(values) request.putHeader(RequestQuerySQL, 1) } // EncodeInterrupt encodes a Interrupt request. func EncodeInterrupt(request *Message, db uint64) { request.reset() request.putUint64(db) request.putHeader(RequestInterrupt, 0) } // EncodeAdd encodes a Add request. func EncodeAdd(request *Message, id uint64, address string) { request.reset() request.putUint64(id) request.putString(address) request.putHeader(RequestAdd, 0) } // EncodeAssign encodes a Assign request. func EncodeAssign(request *Message, id uint64, role uint64) { request.reset() request.putUint64(id) request.putUint64(role) request.putHeader(RequestAssign, 0) } // EncodeRemove encodes a Remove request. func EncodeRemove(request *Message, id uint64) { request.reset() request.putUint64(id) request.putHeader(RequestRemove, 0) } // EncodeDump encodes a Dump request. func EncodeDump(request *Message, name string) { request.reset() request.putString(name) request.putHeader(RequestDump, 0) } // EncodeCluster encodes a Cluster request. func EncodeCluster(request *Message, format uint64) { request.reset() request.putUint64(format) request.putHeader(RequestCluster, 0) } // EncodeTransfer encodes a Transfer request. func EncodeTransfer(request *Message, id uint64) { request.reset() request.putUint64(id) request.putHeader(RequestTransfer, 0) } // EncodeDescribe encodes a Describe request. func EncodeDescribe(request *Message, format uint64) { request.reset() request.putUint64(format) request.putHeader(RequestDescribe, 0) } // EncodeWeight encodes a Weight request. func EncodeWeight(request *Message, weight uint64) { request.reset() request.putUint64(weight) request.putHeader(RequestWeight, 0) } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/response.go000066400000000000000000000133401447672437700254320ustar00rootroot00000000000000package protocol // DO NOT EDIT // // This file was generated by ./schema.sh import "fmt" // DecodeFailure decodes a Failure response. func DecodeFailure(response *Message) (code uint64, message string, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseFailure { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseFailure), mtype) return } code = response.getUint64() message = response.getString() return } // DecodeWelcome decodes a Welcome response. func DecodeWelcome(response *Message) (heartbeatTimeout uint64, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseWelcome { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseWelcome), mtype) return } heartbeatTimeout = response.getUint64() return } // DecodeNodeLegacy decodes a NodeLegacy response. func DecodeNodeLegacy(response *Message) (address string, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseNodeLegacy { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNodeLegacy), mtype) return } address = response.getString() return } // DecodeNode decodes a Node response. func DecodeNode(response *Message) (id uint64, address string, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseNode { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNode), mtype) return } id = response.getUint64() address = response.getString() return } // DecodeNodes decodes a Nodes response. func DecodeNodes(response *Message) (servers Nodes, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseNodes { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseNodes), mtype) return } servers = response.getNodes() return } // DecodeDb decodes a Db response. func DecodeDb(response *Message) (id uint32, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseDb { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseDb), mtype) return } id = response.getUint32() response.getUint32() return } // DecodeStmt decodes a Stmt response. func DecodeStmt(response *Message) (db uint32, id uint32, params uint64, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseStmt { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseStmt), mtype) return } db = response.getUint32() id = response.getUint32() params = response.getUint64() return } // DecodeEmpty decodes a Empty response. func DecodeEmpty(response *Message) (err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseEmpty { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseEmpty), mtype) return } response.getUint64() return } // DecodeResult decodes a Result response. func DecodeResult(response *Message) (result Result, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseResult { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseResult), mtype) return } result = response.getResult() return } // DecodeRows decodes a Rows response. func DecodeRows(response *Message) (rows Rows, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseRows { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseRows), mtype) return } rows = response.getRows() return } // DecodeFiles decodes a Files response. func DecodeFiles(response *Message) (files Files, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseFiles { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseFiles), mtype) return } files = response.getFiles() return } // DecodeMetadata decodes a Metadata response. func DecodeMetadata(response *Message) (failureDomain uint64, weight uint64, err error) { mtype, _ := response.getHeader() if mtype == ResponseFailure { e := ErrRequest{} e.Code = response.getUint64() e.Description = response.getString() err = e return } if mtype != ResponseMetadata { err = fmt.Errorf("decode %s: unexpected type %d", responseDesc(ResponseMetadata), mtype) return } failureDomain = response.getUint64() weight = response.getUint64() return } golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/schema.go000066400000000000000000000050121447672437700250310ustar00rootroot00000000000000package protocol //go:generate ./schema.sh --request init //go:generate ./schema.sh --request Leader unused:uint64 //go:generate ./schema.sh --request Client id:uint64 //go:generate ./schema.sh --request Heartbeat timestamp:uint64 //go:generate ./schema.sh --request Open name:string flags:uint64 vfs:string //go:generate ./schema.sh --request Prepare db:uint64 sql:string //go:generate ./schema.sh --request Exec:0 db:uint32 stmt:uint32 values:NamedValues //go:generate ./schema.sh --request Exec:1 db:uint32 stmt:uint32 values:NamedValues32 //go:generate ./schema.sh --request Query:0 db:uint32 stmt:uint32 values:NamedValues //go:generate ./schema.sh --request Query:1 db:uint32 stmt:uint32 values:NamedValues32 //go:generate ./schema.sh --request Finalize db:uint32 stmt:uint32 //go:generate ./schema.sh --request ExecSQL:0 db:uint64 sql:string values:NamedValues //go:generate ./schema.sh --request ExecSQL:1 db:uint64 sql:string values:NamedValues32 //go:generate ./schema.sh --request QuerySQL:0 db:uint64 sql:string values:NamedValues //go:generate ./schema.sh --request QuerySQL:1 db:uint64 sql:string values:NamedValues32 //go:generate ./schema.sh --request Interrupt db:uint64 //go:generate ./schema.sh --request Add id:uint64 address:string //go:generate ./schema.sh --request Assign id:uint64 role:uint64 //go:generate ./schema.sh --request Remove id:uint64 //go:generate ./schema.sh --request Dump name:string //go:generate ./schema.sh --request Cluster format:uint64 //go:generate ./schema.sh --request Transfer id:uint64 //go:generate ./schema.sh --request Describe format:uint64 //go:generate ./schema.sh --request Weight weight:uint64 //go:generate ./schema.sh --response init //go:generate ./schema.sh --response Failure code:uint64 message:string //go:generate ./schema.sh --response Welcome heartbeatTimeout:uint64 //go:generate ./schema.sh --response NodeLegacy address:string //go:generate ./schema.sh --response Node id:uint64 address:string //go:generate ./schema.sh --response Nodes servers:Nodes //go:generate ./schema.sh --response Db id:uint32 unused:uint32 //go:generate ./schema.sh --response Stmt db:uint32 id:uint32 params:uint64 //go:generate ./schema.sh --response Empty unused:uint64 //go:generate ./schema.sh --response Result result:Result //go:generate ./schema.sh --response Rows rows:Rows //go:generate ./schema.sh --response Files files:Files //go:generate ./schema.sh --response Metadata failureDomain:uint64 weight:uint64 golang-github-cowsql-go-cowsql-1.22.0/internal/protocol/schema.sh000077500000000000000000000044731447672437700250530ustar00rootroot00000000000000#!/bin/bash request_init() { cat > request.go < response.go <> request.go <> request.go <> request.go <> response.go <> response.go <> response.go < Remove a node from the cluster .describe
Show the details of a node .weight
Set the weight of a node .dump
[] Dump the database .reconfigure Reconfigure the cluster `[1:] } func (s *Shell) processCluster(ctx context.Context, line string) (string, error) { cli, err := client.FindLeader(ctx, s.store, client.WithDialFunc(s.dial)) if err != nil { return "", err } cluster, err := cli.Cluster(ctx) if err != nil { return "", err } result := "" switch s.format { case formatTabular: for i, server := range cluster { if i > 0 { result += "\n" } result += fmt.Sprintf("%x|%s|%s", server.ID, server.Address, server.Role) } case formatJson: data, err := json.Marshal(cluster) if err != nil { return "", err } var indented bytes.Buffer json.Indent(&indented, data, "", "\t") result = string(indented.Bytes()) } return result, nil } func (s *Shell) processLeader(ctx context.Context, line string) (string, error) { cli, err := client.FindLeader(ctx, s.store, client.WithDialFunc(s.dial)) if err != nil { return "", err } leader, err := cli.Leader(ctx) if err != nil { return "", err } if leader == nil { return "", nil } return leader.Address, nil } func (s *Shell) processRemove(ctx context.Context, line string) (string, error) { parts := strings.Split(line, " ") if len(parts) != 2 { return "", fmt.Errorf("bad command format, should be: .remove
") } address := parts[1] cli, err := client.FindLeader(ctx, s.store, client.WithDialFunc(s.dial)) if err != nil { return "", err } cluster, err := cli.Cluster(ctx) if err != nil { return "", err } for _, node := range cluster { if node.Address != address { continue } if err := cli.Remove(ctx, node.ID); err != nil { return "", fmt.Errorf("remove node %q: %w", address, err) } return "", nil } return "", fmt.Errorf("no node has address %q", address) } func (s *Shell) processDescribe(ctx context.Context, line string) (string, error) { parts := strings.Split(line, " ") if len(parts) != 2 { return "", fmt.Errorf("bad command format, should be: .describe
") } address := parts[1] cli, err := client.New(ctx, address, client.WithDialFunc(s.dial)) if err != nil { return "", err } metadata, err := cli.Describe(ctx) if err != nil { return "", err } result := "" switch s.format { case formatTabular: result += fmt.Sprintf("%s|%d|%d", address, metadata.FailureDomain, metadata.Weight) case formatJson: data, err := json.Marshal(metadata) if err != nil { return "", err } var indented bytes.Buffer json.Indent(&indented, data, "", "\t") result = string(indented.Bytes()) } return result, nil } func (s *Shell) processDump(ctx context.Context, line string) (string, error) { parts := strings.Split(line, " ") if len(parts) < 2 || len(parts) > 3 { return "NOK", fmt.Errorf("bad command format, should be: .dump
[]") } address := parts[1] cli, err := client.New(ctx, address, client.WithDialFunc(s.dial)) if err != nil { return "NOK", fmt.Errorf("dial failed") } database := "db.bin" if len(parts) == 3 { database = parts[2] } files, err := cli.Dump(ctx, database) if err != nil { return "NOK", fmt.Errorf("dump failed") } dir, err := os.Getwd() if err != nil { return "NOK", fmt.Errorf("os.Getwd() failed") } for _, file := range files { path := filepath.Join(dir, file.Name) err := ioutil.WriteFile(path, file.Data, 0600) if err != nil { return "NOK", fmt.Errorf("WriteFile failed on path %s", path) } } return "OK", nil } func (s *Shell) processReconfigure(ctx context.Context, line string) (string, error) { parts := strings.Split(line, " ") if len(parts) != 3 { return "NOK", fmt.Errorf("bad command format, should be: .reconfigure \n" + "Args:\n" + "\tdir - Directory of node with up to date data\n" + "\tclusteryaml - Path to a .yaml file containing the desired cluster configuration\n\n" + "Help:\n" + "\tUse this command when trying to preserve the data from your cluster while changing the\n" + "\tconfiguration of the cluster because e.g. your cluster is broken due to unreachablee nodes.\n" + "\t0. BACKUP ALL YOUR NODE DATA DIRECTORIES BEFORE PROCEEDING!\n" + "\t1. Stop all cowsql nodes.\n" + "\t2. Identify the dir of the node with the most up to date raft term and log, this will be the argument.\n" + "\t3. Create a .yaml file with the same format as cluster.yaml (or use/adapt an existing cluster.yaml) with the\n " + "\t desired cluster configuration. This will be the argument.\n" + "\t Don't forget to make sure the ID's in the file line up with the ID's in the info.yaml files.\n" + "\t4. Run the .reconfigure command, it should return \"OK\".\n" + "\t5. Copy the snapshot-xxx-xxx-xxx, snapshot-xxx-xxx-xxx.meta, segment files (00000xxxxx-000000xxxxx), desired cluster.yaml\n" + "\t from over to the directories of the other nodes identified in , deleting any leftover snapshot-xxx-xxx-xxx, snapshot-xxx-xxx-xxx.meta,\n" + "\t segment (00000xxxxx-000000xxxxx, open-xxx) and metadata{1,2} files that it contains.\n" + "\t Make sure an info.yaml is also present that is in line with cluster.yaml.\n" + "\t6. Start all the cowsql nodes.\n" + "\t7. If, for some reason, this fails or gives undesired results, try again with data from another node (you should still have this from step 0).\n") } dir := parts[1] clusteryamlpath := parts[2] store, err := client.NewYamlNodeStore(clusteryamlpath) if err != nil { return "NOK", fmt.Errorf("failed to create YamlNodeStore from file at %s :%v", clusteryamlpath, err) } servers, err := store.Get(ctx) if err != nil { return "NOK", fmt.Errorf("failed to retrieve NodeInfo list :%v", err) } err = cowsql.ReconfigureMembershipExt(dir, servers) if err != nil { return "NOK", fmt.Errorf("failed to reconfigure membership :%v", err) } return "OK", nil } func (s *Shell) processWeight(ctx context.Context, line string) (string, error) { parts := strings.Split(line, " ") if len(parts) != 3 { return "", fmt.Errorf("bad command format, should be: .weight
") } address := parts[1] weight, err := strconv.Atoi(parts[2]) if err != nil || weight < 0 { return "", fmt.Errorf("bad weight %q", parts[2]) } cli, err := client.New(ctx, address, client.WithDialFunc(s.dial)) if err != nil { return "", err } if err := cli.Weight(ctx, uint64(weight)); err != nil { return "", err } return "", nil } func (s *Shell) processQuery(ctx context.Context, line string) (string, error) { tx, err := s.db.BeginTx(ctx, nil) if err != nil { return "", fmt.Errorf("begin transaction: %w", err) } rows, err := tx.Query(line) if err != nil { err = fmt.Errorf("query: %w", err) if rbErr := tx.Rollback(); rbErr != nil { return "", fmt.Errorf("unable to rollback: %v", err) } return "", err } defer rows.Close() columns, err := rows.Columns() if err != nil { err = fmt.Errorf("columns: %w", err) if rbErr := tx.Rollback(); rbErr != nil { return "", fmt.Errorf("unable to rollback: %v", err) } return "", err } n := len(columns) var sb strings.Builder for rows.Next() { row := make([]interface{}, n) rowPointers := make([]interface{}, n) for i := range row { rowPointers[i] = &row[i] } if err := rows.Scan(rowPointers...); err != nil { err = fmt.Errorf("scan: %w", err) if rbErr := tx.Rollback(); rbErr != nil { return "", fmt.Errorf("unable to rollback: %v", err) } return "", err } for i, column := range row { if i == 0 { fmt.Fprintf(&sb, "%v", column) } else { fmt.Fprintf(&sb, "|%v", column) } } sb.WriteByte('\n') } if err := rows.Err(); err != nil { err = fmt.Errorf("rows: %w", err) if rbErr := tx.Rollback(); rbErr != nil { return "", fmt.Errorf("unable to rollback: %v", err) } return "", err } if err := tx.Commit(); err != nil { return "", fmt.Errorf("commit: %w", err) } return strings.TrimRight(sb.String(), "\n"), nil } func (s *Shell) processExec(ctx context.Context, line string) error { tx, err := s.db.BeginTx(ctx, nil) if err != nil { return err } if _, err := tx.Exec(line); err != nil { err = fmt.Errorf("exec: %w", err) if rbErr := tx.Rollback(); rbErr != nil { return fmt.Errorf("unable to rollback: %v", err) } return err } if err := tx.Commit(); err != nil { return fmt.Errorf("commit: %w", err) } return nil } golang-github-cowsql-go-cowsql-1.22.0/logging/000077500000000000000000000000001447672437700212155ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/logging/func.go000066400000000000000000000012041447672437700224740ustar00rootroot00000000000000package logging import ( "fmt" "testing" ) // Func is a function that can be used for logging. type Func func(Level, string, ...interface{}) // Test returns a logging function that forwards messages to the test logger. func Test(t *testing.T) Func { return func(l Level, format string, a ...interface{}) { format = fmt.Sprintf("%s: %s", l.String(), format) t.Logf(format, a...) } } // Stdout returns a logging function that prints log messages on standard // output. func Stdout() Func { return func(l Level, format string, a ...interface{}) { format = fmt.Sprintf("%s: %s\n", l.String(), format) fmt.Printf(format, a...) } } golang-github-cowsql-go-cowsql-1.22.0/logging/func_test.go000066400000000000000000000002521447672437700235350ustar00rootroot00000000000000package logging_test import ( "testing" "github.com/cowsql/go-cowsql/logging" ) func Test_TestFunc(t *testing.T) { f := logging.Test(t) f(logging.Info, "hello") } golang-github-cowsql-go-cowsql-1.22.0/logging/level.go000066400000000000000000000005351447672437700226560ustar00rootroot00000000000000package logging // Level defines the logging level. type Level int // Available logging levels. const ( None Level = iota Debug Info Warn Error ) func (l Level) String() string { switch l { case Debug: return "DEBUG" case Info: return "INFO" case Warn: return "WARN" case Error: return "ERROR" default: return "UNKNOWN" } } golang-github-cowsql-go-cowsql-1.22.0/logging/level_test.go000066400000000000000000000006651447672437700237210ustar00rootroot00000000000000package logging_test import ( "testing" "github.com/cowsql/go-cowsql/logging" "github.com/stretchr/testify/assert" ) func TestLevel_String(t *testing.T) { assert.Equal(t, "DEBUG", logging.Debug.String()) assert.Equal(t, "INFO", logging.Info.String()) assert.Equal(t, "WARN", logging.Warn.String()) assert.Equal(t, "ERROR", logging.Error.String()) unknown := logging.Level(666) assert.Equal(t, "UNKNOWN", unknown.String()) } golang-github-cowsql-go-cowsql-1.22.0/node.go000066400000000000000000000146431447672437700210530ustar00rootroot00000000000000package cowsql import ( "context" "time" "github.com/cowsql/go-cowsql/client" "github.com/cowsql/go-cowsql/internal/bindings" "github.com/pkg/errors" ) // Node runs a cowsql node. type Node struct { log client.LogFunc // Logger server *bindings.Node // Low-level C implementation acceptCh chan error // Receives connection handling errors id uint64 address string bindAddress string cancel context.CancelFunc } // NodeInfo is a convenience alias for client.NodeInfo. type NodeInfo = client.NodeInfo // SnapshotParams exposes bindings.SnapshotParams. Used for setting cowsql's // snapshot parameters. // SnapshotParams.Threshold controls after how many raft log entries a snapshot is // taken. The higher this number, the lower the frequency of the snapshots. // SnapshotParams.Trailing controls how many raft log entries are retained after // taking a snapshot. type SnapshotParams = bindings.SnapshotParams // Option can be used to tweak node parameters. type Option func(*options) // WithDialFunc sets a custom dial function for the server. func WithDialFunc(dial client.DialFunc) Option { return func(options *options) { options.DialFunc = dial } } // WithBindAddress sets a custom bind address for the server. func WithBindAddress(address string) Option { return func(options *options) { options.BindAddress = address } } // WithNetworkLatency sets the average one-way network latency. func WithNetworkLatency(latency time.Duration) Option { return func(options *options) { options.NetworkLatency = uint64(latency.Nanoseconds()) } } // WithFailureDomain sets the code of the failure domain the node belongs to. func WithFailureDomain(code uint64) Option { return func(options *options) { options.FailureDomain = code } } // WithSnapshotParams sets the snapshot parameters of the node. func WithSnapshotParams(params SnapshotParams) Option { return func(options *options) { options.SnapshotParams = params } } // WithAutoRecovery enables or disables auto-recovery of persisted data // at startup for this node. // // When auto-recovery is enabled, raft snapshots and segment files may be // deleted at startup if they are determined to be corrupt. This helps // the startup process to succeed in more cases, but can lead to data loss. // // Auto-recovery is enabled by default. func WithAutoRecovery(recovery bool) Option { return func(options *options) { options.AutoRecovery = recovery } } // New creates a new Node instance. func New(id uint64, address string, dir string, options ...Option) (*Node, error) { o := defaultOptions() for _, option := range options { option(o) } ctx, cancel := context.WithCancel(context.Background()) server, err := bindings.NewNode(ctx, id, address, dir) if err != nil { cancel() return nil, err } if o.DialFunc != nil { if err := server.SetDialFunc(o.DialFunc); err != nil { cancel() return nil, err } } if o.BindAddress != "" { if err := server.SetBindAddress(o.BindAddress); err != nil { cancel() return nil, err } } if o.NetworkLatency != 0 { if err := server.SetNetworkLatency(o.NetworkLatency); err != nil { cancel() return nil, err } } if o.FailureDomain != 0 { if err := server.SetFailureDomain(o.FailureDomain); err != nil { cancel() return nil, err } } if o.SnapshotParams.Threshold != 0 || o.SnapshotParams.Trailing != 0 { if err := server.SetSnapshotParams(o.SnapshotParams); err != nil { cancel() return nil, err } } if err := server.SetAutoRecovery(o.AutoRecovery); err != nil { cancel() return nil, err } s := &Node{ server: server, acceptCh: make(chan error, 1), id: id, address: address, bindAddress: o.BindAddress, cancel: cancel, } return s, nil } // BindAddress returns the network address the node is listening to. func (s *Node) BindAddress() string { return s.server.GetBindAddress() } // Start serving requests. func (s *Node) Start() error { return s.server.Start() } // Recover a node by forcing a new cluster configuration. // // DEPRECATED: Use ReconfigureMembership instead, which does not require // instantiating a new Node object. func (s *Node) Recover(cluster []NodeInfo) error { return s.server.Recover(cluster) } // Hold configuration options for a cowsql server. type options struct { Log client.LogFunc DialFunc client.DialFunc BindAddress string NetworkLatency uint64 FailureDomain uint64 SnapshotParams bindings.SnapshotParams AutoRecovery bool } // Close the server, releasing all resources it created. func (s *Node) Close() error { s.cancel() // Send a stop signal to the cowsql event loop. if err := s.server.Stop(); err != nil { return errors.Wrap(err, "server failed to stop") } s.server.Close() return nil } // BootstrapID is a magic ID that should be used for the fist node in a // cluster. Alternatively ID 1 can be used as well. const BootstrapID = 0x2dc171858c3155be // GenerateID generates a unique ID for a new node, based on a hash of its // address and the current time. func GenerateID(address string) uint64 { return bindings.GenerateID(address) } // ReconfigureMembership can be used to recover a cluster whose majority of // nodes have died, and therefore has become unavailable. // // It forces appending a new configuration to the raft log stored in the given // directory, effectively replacing the current configuration. func ReconfigureMembership(dir string, cluster []NodeInfo) error { server, err := bindings.NewNode(context.Background(), 1, "1", dir) if err != nil { return err } defer server.Close() return server.Recover(cluster) } // ReconfigureMembershipExt can be used to recover a cluster whose majority of // nodes have died, and therefore has become unavailable. // // It forces appending a new configuration to the raft log stored in the given // directory, effectively replacing the current configuration. // In comparision with ReconfigureMembership, this function takes the node role // into account and makes use of a cowsql API that supports extending the // NodeInfo struct. func ReconfigureMembershipExt(dir string, cluster []NodeInfo) error { server, err := bindings.NewNode(context.Background(), 1, "1", dir) if err != nil { return err } defer server.Close() return server.RecoverExt(cluster) } // Create a options object with sane defaults. func defaultOptions() *options { return &options{ DialFunc: client.DefaultDialFunc, AutoRecovery: true, } } golang-github-cowsql-go-cowsql-1.22.0/test/000077500000000000000000000000001447672437700205465ustar00rootroot00000000000000golang-github-cowsql-go-cowsql-1.22.0/test/cowsql-demo-util.sh000077500000000000000000000027631447672437700243220ustar00rootroot00000000000000# cowsql-demo test utilities GO=${GO:-go} ASAN=${ASAN:-} VERBOSE=${VERBOSE:-0} $GO build -tags libsqlite3 $ASAN ./cmd/cowsql-demo/ DIR=$(mktemp -d) start_node() { n="${1}" pidfile="${DIR}/pid.${n}" join="${2}" verbose="" if [ "$VERBOSE" -eq 1 ]; then verbose="--verbose" fi ./cowsql-demo --dir "$DIR" --api=127.0.0.1:800"${n}" --db=127.0.0.1:900"${n}" "$join" $verbose & echo "${!}" > "${pidfile}" i=0 while ! nc -z 127.0.0.1 800"${n}" 2>/dev/null; do i=$(expr $i + 1) sleep 0.2 if [ "$i" -eq 25 ]; then echo "Error: node ${n} not yet up after 5 seconds" exit 1 fi done } kill_node() { n=$1 pidfile="${DIR}/pid.${n}" if ! [ -e "$pidfile" ]; then return fi pid=$(cat "${pidfile}") kill -TERM "$pid" wait "$pid" rm "${pidfile}" } set_up_node() { n=$1 join="" if [ "$n" -ne 1 ]; then join=--join=127.0.0.1:9001 fi echo "=> Set up cowsql-demo node $n" start_node "${n}" "${join}" } tear_down_node() { n=$1 echo "=> Tear down cowsql-demo node $n" kill_node "$n" } set_up() { echo "=> Set up cowsql-demo cluster" set_up_node 1 set_up_node 2 set_up_node 3 } tear_down() { err=$? trap '' HUP INT TERM echo "=> Tear down cowsql-demo cluster" tear_down_node 3 tear_down_node 2 tear_down_node 1 rm -rf "$DIR" exit $err } sig_handler() { trap '' EXIT tear_down } golang-github-cowsql-go-cowsql-1.22.0/test/cowsql-demo.sh000077500000000000000000000012571447672437700233440ustar00rootroot00000000000000#!/bin/sh -eu # # Test the cowsql-demo application. BASEDIR=$(dirname "$0") . "$BASEDIR"/cowsql-demo-util.sh trap tear_down EXIT trap sig_handler HUP INT TERM set_up echo "=> Start test" echo "=> Put key to node 1" if [ "$(curl -s -X PUT -d my-key http://127.0.0.1:8001/my-value)" != "done" ]; then echo "Error: put key to node 1" fi echo "=> Get key from node 1" if [ "$(curl -s http://127.0.0.1:8001/my-value)" != "my-key" ]; then echo "Error: get key from node 1" fi echo "=> Kill node 1" kill_node 1 echo "=> Get key from node 2" if [ "$(curl -s http://127.0.0.1:8002/my-value)" != "my-key" ]; then echo "Error: get key from node 2" fi echo "=> Test successful" golang-github-cowsql-go-cowsql-1.22.0/test/recover.sh000077500000000000000000000044511447672437700225560ustar00rootroot00000000000000#!/bin/sh -eu # # Test the cowsql cluster recovery. ASAN=${ASAN:-} BASEDIR=$(dirname "$0") . "$BASEDIR"/cowsql-demo-util.sh $GO build -tags libsqlite3 $ASAN ./cmd/cowsql/ trap tear_down EXIT trap sig_handler HUP INT TERM set_up echo "=> Start test" echo "=> Put key to node 1" if [ "$(curl -s -X PUT -d my-value http://127.0.0.1:8001/my-key)" != "done" ]; then echo "Error: put key to node 1" fi echo "=> Get key from node 1" if [ "$(curl -s http://127.0.0.1:8001/my-key)" != "my-value" ]; then echo "Error: get key from node 1" fi echo "=> Stopping the cluster" tear_down_node 3 tear_down_node 2 tear_down_node 1 echo "=> Running recovery on node 1" node1_dir=$DIR/127.0.0.1:9001 node2_dir=$DIR/127.0.0.1:9002 node1_id=$(grep ID "$node1_dir"/info.yaml | cut -d" " -f2) node2_id=$(grep ID "$node2_dir"/info.yaml | cut -d" " -f2) target_yaml=${DIR}/cluster.yaml cat < "$target_yaml" - Address: 127.0.0.1:9001 ID: ${node1_id} Role: 0 - Address: 127.0.0.1:9002 ID: ${node2_id} Role: 1 EOF if ! ./cowsql -s 127.0.0.1:9001 test ".reconfigure ${node1_dir} ${target_yaml}"; then echo "Error: Reconfigure failed" exit 1 fi echo "=> Starting nodes 1 & 2" start_node 1 "" start_node 2 "" echo "=> Confirming new config" if [ "$(./cowsql -s 127.0.0.1:9001 test .leader)" != 127.0.0.1:9001 ]; then echo "Error: Expected node 1 to be leader" exit 1 fi if [ "$(./cowsql -s 127.0.0.1:9001 test .cluster | wc -l)" != 2 ]; then echo "Error: Expected 2 servers in the cluster" exit 1 fi if ! ./cowsql -s 127.0.0.1:9001 test .cluster | grep -q "127.0.0.1:9001|voter"; then echo "Error: server 1 not voter" exit 1 fi if ! ./cowsql -s 127.0.0.1:9001 test .cluster | grep -q "127.0.0.1:9002|stand-by"; then echo "Error: server 2 not stand-by" exit 1 fi echo "=> Get original key from node 1" if [ "$(curl -s http://127.0.0.1:8001/my-key)" != "my-value" ]; then echo "Error: get key from node 1" exit 1 fi echo "=> Put new key to node 1" if [ "$(curl -s -X PUT -d my-value-new http://127.0.0.1:8001/my-key-new)" != "done" ]; then echo "Error: put new key to node 1" exit 1 fi echo "=> Get new key from node 1" if [ "$(curl -s http://127.0.0.1:8001/my-key-new)" != "my-value-new" ]; then echo "Error: get new key from node 1" exit 1 fi echo "=> Test successful" golang-github-cowsql-go-cowsql-1.22.0/test/roles.sh000077500000000000000000000131141447672437700222310ustar00rootroot00000000000000#!/bin/bash -eu # # Test dynamic roles management. GO=${GO:-go} ASAN=${ASAN:-} VERBOSE=${VERBOSE:-0} DIR=$(mktemp -d) BINARY=$DIR/main CLUSTER=127.0.0.1:9001,127.0.0.1:9002,127.0.0.1:9003,127.0.0.1:9004,127.0.0.1:9005,127.0.0.1:9006 N=7 $GO build -tags libsqlite3 $ASAN ./cmd/cowsql/ set_up_binary() { cat > "$DIR"/main.go < 1 { join = append(join, "127.0.0.1:9001") } addr := fmt.Sprintf("127.0.0.1:900%d", index) if err := os.MkdirAll(dir, 0755); err != nil { panic(err) } app, err := app.New( dir, app.WithAddress(addr), app.WithCluster(join), app.WithLogFunc(logFunc), app.WithRolesAdjustmentFrequency(3 * time.Second), ) if err != nil { panic(err) } ctx, _ := context.WithTimeout(context.Background(), 30 * time.Second) if err := app.Ready(ctx); err != nil { panic(err) } <-ch ctx, cancel := context.WithTimeout(context.Background(), 2 * time.Second) defer cancel() app.Handover(ctx) app.Close() } EOF $GO build -o "$BINARY" -tags libsqlite3 $ASAN "$DIR"/main.go } start_node() { n="${1}" pidfile="${DIR}/pid.${n}" $BINARY "$n" & echo "${!}" > "${pidfile}" } kill_node() { n=$1 signal=$2 pidfile="${DIR}/pid.${n}" if ! [ -e "$pidfile" ]; then return fi pid=$(cat "${pidfile}") kill -"${signal}" "$pid" wait "$pid" || true rm "${pidfile}" } # Wait for the cluster to have 3 voters, 2 stand-bys and 1 spare wait_stable() { i=0 while true; do i=$(expr $i + 1) voters=$(./cowsql -s "$CLUSTER" test .cluster | grep voter | wc -l) standbys=$(./cowsql -s "$CLUSTER" test .cluster | grep stand-by | wc -l) spares=$(./cowsql -s "$CLUSTER" test .cluster | grep spare | wc -l) if [ "$voters" -eq 3 ] && [ "$standbys" -eq 3 ] && [ "$spares" -eq 1 ] ; then break fi if [ "$i" -eq 40 ]; then echo "Error: node roles not yet stable after 10 seconds" ./cowsql -s "$CLUSTER" test .cluster exit 1 fi sleep 0.25 done } # Wait for the given node to have the given role wait_role() { index=$1 role=$2 i=0 while true; do i=$(expr $i + 1) current=$(./cowsql -s "$CLUSTER" test .cluster | grep "127.0.0.1:900${index}" | cut -f 3 -d "|") if [ "$current" = "$role" ]; then break fi if [ "$i" -eq 40 ]; then echo "Error: node $index has role $current instead of $role" ./cowsql -s "$CLUSTER" test .cluster exit 1 fi sleep 0.25 done } set_up_node() { n=$1 echo "=> Set up test node $n" start_node "${n}" } set_up() { echo "=> Set up test cluster" set_up_binary for i in $(seq $N); do set_up_node "$i" done } tear_down_node() { n=$1 echo "=> Tear down test node $n" kill_node "$n" TERM } tear_down() { err=$? trap '' HUP INT TERM echo "=> Tear down test cluster" for i in $(seq $N -1 1); do tear_down_node "$i" done rm -rf "$DIR" exit $err } sig_handler() { trap '' EXIT tear_down } trap tear_down EXIT trap sig_handler HUP INT TERM set_up echo "=> Wait for roles to get stable" wait_stable # Stop one node at a time gracefully, then check that the cluster is stable. for i in $(seq 10); do index=$((1 + RANDOM % $N)) echo "=> Stop node $index" kill_node $index TERM echo "=> Wait for roles to get stable" wait_role $index spare wait_stable echo "=> Restart node $index" start_node $index sleep 2 done # Kill one node at a time ungracefully, then check that the cluster is stable. for i in $(seq 1); do index=$((1 + RANDOM % $N)) echo "=> Kill node $index" kill_node $index KILL echo "=> Wait for roles to get stable" wait_role $index spare wait_stable echo "=> Restart node $index" start_node $index sleep 2 done # Stop two nodes at a time gracefully, then check that the cluster is stable. for i in $(seq 10); do index1=$((1 + RANDOM % $N)) index2=$((1 + (index1 + $((RANDOM % ($N - 1)))) % $N)) echo "=> Stop nodes $index1 and $index2" kill_node $index1 TERM kill_node $index2 TERM sleep 2 echo "=> Restart nodes $index1 and $index2" start_node $index1 start_node $index2 echo "=> Wait for roles to get stable" wait_stable sleep 1 done # Kill two nodes at a time ungracefully, then check that the cluster is stable. for i in $(seq 10); do index1=$((1 + RANDOM % $N)) index2=$((1 + (index1 + $((RANDOM % ($N - 1)))) % $N)) echo "=> Stop nodes $index1 and $index2" kill_node $index1 KILL kill_node $index2 KILL sleep 5 echo "=> Restart nodes $index1 and $index2" start_node $index1 start_node $index2 echo "=> Wait for roles to get stable" wait_stable sleep 1 done echo "=> Test successful"