pax_global_header00006660000000000000000000000064147735134240014524gustar00rootroot0000000000000052 comment=e040f7139e0dc179f04fd61e1dd05c5c1fd07d60 nats.go-1.41.0/000077500000000000000000000000001477351342400131605ustar00rootroot00000000000000nats.go-1.41.0/.github/000077500000000000000000000000001477351342400145205ustar00rootroot00000000000000nats.go-1.41.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001477351342400167035ustar00rootroot00000000000000nats.go-1.41.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000005111477351342400206700ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Discussion url: https://github.com/nats-io/nats.go/discussions about: Ideal for ideas, feedback, or longer form questions. - name: Chat url: https://slack.nats.io about: Ideal for short, one-off questions, general conversation, and meeting other NATS users! nats.go-1.41.0/.github/ISSUE_TEMPLATE/defect.yml000066400000000000000000000030071477351342400206600ustar00rootroot00000000000000--- name: Defect description: Report a defect, such as a bug or regression. labels: - defect body: - type: textarea id: observed attributes: label: Observed behavior description: Describe the unexpected behavior or performance regression you are observing. validations: required: true - type: textarea id: expected attributes: label: Expected behavior description: Describe the expected behavior or performance characteristics. validations: required: true - type: textarea id: versions attributes: label: Server and client version description: |- Provide the versions you were using when the detect was observed. For the server, use `nats-server --version`, check the startup log output, or the image tag pulled from Docker. For the CLI client, use `nats --version`. For language-specific clients, check the version downloaded by the language dependency manager. validations: required: true - type: textarea id: environment attributes: label: Host environment description: |- Specify any relevant details about the host environment the server and/or client was running in, such as operating system, CPU architecture, container runtime, etc. validations: required: false - type: textarea id: steps attributes: label: Steps to reproduce description: Provide as many concrete steps to reproduce the defect. validations: required: false nats.go-1.41.0/.github/ISSUE_TEMPLATE/proposal.yml000066400000000000000000000013241477351342400212650ustar00rootroot00000000000000--- name: Proposal description: Propose an enhancement or new feature. labels: - proposal body: - type: textarea id: change attributes: label: Proposed change description: This could be a behavior change, enhanced API, or a new feature. validations: required: true - type: textarea id: usecase attributes: label: Use case description: What is the use case or general motivation for this proposal? validations: required: true - type: textarea id: contribute attributes: label: Contribution description: |- Are you intending or interested in contributing code for this proposal if accepted? validations: required: false nats.go-1.41.0/.github/workflows/000077500000000000000000000000001477351342400165555ustar00rootroot00000000000000nats.go-1.41.0/.github/workflows/ci.yaml000066400000000000000000000046341477351342400200430ustar00rootroot00000000000000name: ci on: push: branches: - main pull_request: release: types: [published] jobs: lint: runs-on: ubuntu-latest-8-cores steps: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v5 with: go-version: 'stable' - name: Install deps shell: bash --noprofile --norc -x -eo pipefail {0} run: | go get -t ./... go install honnef.co/go/tools/cmd/staticcheck@latest go install github.com/client9/misspell/cmd/misspell@latest go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - name: Run linters shell: bash --noprofile --norc -x -eo pipefail {0} run: | $(exit $(go fmt -modfile=go_test.mod ./... | wc -l)) go vet -modfile=go_test.mod ./... GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./... find . -type f -name "*.go" | xargs misspell -error -locale US golangci-lint run --timeout 5m0s ./jetstream/... test: runs-on: ubuntu-latest-8-cores strategy: matrix: go: [ "1.23", "1.24" ] steps: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - name: Install deps shell: bash --noprofile --norc -x -eo pipefail {0} run: | go install github.com/mattn/goveralls@latest go install github.com/wadey/gocovmerge@latest - name: Test and coverage shell: bash --noprofile --norc -x -eo pipefail {0} run: | go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off if [ "${{ matrix.go }}" = "1.24" ]; then ./scripts/cov.sh CI else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing fi - name: Coveralls if: matrix.go == '1.24' uses: coverallsapp/github-action@v2 with: file: acc.outnats.go-1.41.0/.github/workflows/dependencies.yaml000066400000000000000000000033101477351342400220640ustar00rootroot00000000000000name: License Check on: push: paths: - 'go.mod' branches: - main jobs: license-check: runs-on: ubuntu-latest env: BRANCH_NAME: update-report-branch-${{ github.run_id }} steps: - name: Checkout repository uses: actions/checkout@v3 with: fetch-depth: 0 # Fetch all history for all branches and tags - name: Set up Go uses: actions/setup-go@v5 with: go-version: '1.22' - name: Install go-licenses run: go install github.com/google/go-licenses@latest # We need this step because of test dependencies and how they are handled in nats.go - name: Run go mod tidy run: go mod tidy - name: Run license check run: go-licenses report ./... --template dependencies.tpl > dependencies.md - name: Configure git run: | git config user.name 'github-actions[bot]' git config user.email 'github-actions[bot]@users.noreply.github.com' - name: Check for changes id: git_diff run: | git fetch git diff --exit-code dependencies.md || echo "has_changes=true" >> $GITHUB_ENV - name: Commit changes if: env.has_changes == 'true' run: | git checkout -b "$BRANCH_NAME" git add dependencies.md git commit -m "Update dependencies.md" git push -u origin "$BRANCH_NAME" - name: Create Pull Request if: env.has_changes == 'true' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh pr create --title "Update dependencies.md" --body "This PR updates the dependencies report" --head "$BRANCH_NAME" --base main nats.go-1.41.0/.github/workflows/latest-server.yaml000066400000000000000000000015551477351342400222470ustar00rootroot00000000000000name: Test nats-server@main on: schedule: - cron: "30 8 * * *" jobs: test: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v5 with: go-version: 'stable' - name: Get latest server shell: bash --noprofile --norc -x -eo pipefail {0} run: | go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main - name: Test shell: bash --noprofile --norc -x -eo pipefail {0} run: | go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testingnats.go-1.41.0/.gitignore000066400000000000000000000006001477351342400151440ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe # Emacs *~ \#*\# .\#* # vi/vim .??*.swp # Mac .DS_Store # Eclipse .project .settings/ # bin # Goland .idea # VS Code .vscode nats.go-1.41.0/.golangci.yaml000066400000000000000000000004501477351342400157040ustar00rootroot00000000000000issues: max-issues-per-linter: 0 max-same-issues: 0 exclude-rules: - linters: - errcheck text: "Unsubscribe" - linters: - errcheck text: "Drain" - linters: - errcheck text: "msg.Ack" - linters: - errcheck text: "watcher.Stop" nats.go-1.41.0/.words000066400000000000000000000013421477351342400143170ustar00rootroot000000000000001 derek dlc ivan acknowledgement/SM arity deduplication/S demarshal/SDG durables iff observable/S redelivery/S retransmitting retry/SB SlowConsumer AppendInt ReadMIMEHeader clientProtoZero jetstream v1 v2 ack/SGD auth authToken chans creds config/S cseq impl msgh msgId mux/S nack ptr puback scanf stderr stdout structs tm todo unsub/S permessage permessage-deflate urlA urlB websocket ws wss NKey pList backend/S backoff/S decompressor/CGS inflight inlined lookups reconnection/MS redeliver/ADGS responder/S rewrap/S rollup/S unreceive/DRSZGB variadic wakeup/S whitespace wrap/AS omitempty apache html ietf www sum256 32bit/S 64bit/S 64k 128k 512k hacky handroll/D rfc6455 rfc7692 0x00 0xff 20x 40x 50x ErrXXX atlanta eu nats.go-1.41.0/.words.readme000066400000000000000000000021321477351342400155510ustar00rootroot00000000000000The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries but populates the dictionary with identifiers from the Go source. Alas, no comments are allowed in the .words file and newer versions of gospel error out on seeing them. This is really a hunspell restriction. We assume en_US hunspell dictionaries are installed and used. The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) Invoke `hunspell -D` to see the actual locations. Words which are in the base dictionary can't have extra affix rules added to them, so we have to start with the affixed variant we want to add. Thus `creds` rather than `cred/S` and so on. So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, we have to use unreceive as the stem. We can't define our own affix or compound rules, to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... "permessage-deflate", which is an RFC7692 registered extension for websockets. We have to explicitly list "permessage". nats.go-1.41.0/CODE-OF-CONDUCT.md000066400000000000000000000002121477351342400156060ustar00rootroot00000000000000## Community Code of Conduct NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). nats.go-1.41.0/CONTRIBUTING.md000066400000000000000000000075471477351342400154260ustar00rootroot00000000000000# Contributing Thanks for your interest in contributing! This document contains `nats-io/nats.go` specific contributing details. If you are a first-time contributor, please refer to the general [NATS Contributor Guide](https://nats.io/contributing/) to get a comprehensive overview of contributing to the NATS project. ## Getting started There are three general ways you can contribute to this repo: - Proposing an enhancement or new feature - Reporting a bug or regression - Contributing changes to the source code For the first two, refer to the [GitHub Issues](https://github.com/nats-io/nats.go/issues/new/choose) which guides you through the available options along with the needed information to collect. ## Contributing changes _Prior to opening a pull request, it is recommended to open an issue first to ensure the maintainers can review intended changes. Exceptions to this rule include fixing non-functional source such as code comments, documentation or other supporting files._ Proposing source code changes is done through GitHub's standard pull request workflow. If your branch is a work-in-progress then please start by creating your pull requests as draft, by clicking the down-arrow next to the `Create pull request` button and instead selecting `Create draft pull request`. This will defer the automatic process of requesting a review from the NATS team and significantly reduces noise until you are ready. Once you are happy, you can click the `Ready for review` button. ### Guidelines A good pull request includes: - A high-level description of the changes, including links to any issues that are related by adding comments like `Resolves #NNN` to your description. See [Linking a Pull Request to an Issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) for more information. - An up-to-date parent commit. Please make sure you are pulling in the latest `main` branch and rebasing your work on top of it, i.e. `git rebase main`. - Unit tests where appropriate. Bug fixes will benefit from the addition of regression tests. New features will not be accepted without suitable test coverage! - No more commits than necessary. Sometimes having multiple commits is useful for telling a story or isolating changes from one another, but please squash down any unnecessary commits that may just be for clean-up, comments or small changes. - No additional external dependencies that aren't absolutely essential. Please do everything you can to avoid pulling in additional libraries/dependencies into `go.mod` as we will be very critical of these. ### Sign-off In order to accept a contribution, you will first need to certify that the contribution is your original work and that you license the work to the project under the [Apache-2.0 license](https://github.com/nats-io/nats.go/blob/main/LICENSE). This is done by using `Signed-off-by` statements, which should appear in **both** your commit messages and your PR description. Please note that we can only accept sign-offs under a legal name. Nicknames and aliases are not permitted. To perform a sign-off with `git`, use `git commit -s` (or `--signoff`). ## Get help If you have questions about the contribution process, please start a [GitHub discussion](https://github.com/nats-io/nats.go/discussions), join the [NATS Slack](https://slack.nats.io/), or send your question to the [NATS Google Group](https://groups.google.com/forum/#!forum/natsio). ## Testing You should use `go_test.mod` to manage your testing dependencies. Please use the following command to update your dependencies and avoid changing the main `go.mod` in a PR: ```shell go mod tidy -modfile=go_test.mod ``` To the tests you can pass `-modfile=go_test.mod` flag to `go test` or instead you can also set `GOFLAGS="-modfile=go_test.mod"` as an environment variable: ```shell go test ./... -modfile=go_test.mod ``` nats.go-1.41.0/GOVERNANCE.md000066400000000000000000000002751477351342400151350ustar00rootroot00000000000000# NATS Go Client Governance NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md).nats.go-1.41.0/LICENSE000066400000000000000000000261351477351342400141740ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. nats.go-1.41.0/MAINTAINERS.md000066400000000000000000000004661477351342400152620ustar00rootroot00000000000000# Maintainers Maintainership is on a per project basis. ### Maintainers - Derek Collison [@derekcollison](https://github.com/derekcollison) - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) nats.go-1.41.0/README.md000066400000000000000000000325551477351342400144510ustar00rootroot00000000000000# NATS - Go Client A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). [![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url] [License-Url]: https://www.apache.org/licenses/LICENSE-2.0 [License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg [ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go [ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go [Build-Status-Url]: https://github.com/nats-io/nats.go/actions [Build-Status-Image]: https://github.com/nats-io/nats.go/actions/workflows/ci.yaml/badge.svg?branch=main [GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go [GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c [Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main [Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main **Check out [NATS by example](https://natsbyexample.com) - An evolving collection of runnable, cross-client reference examples for NATS.** ## Installation ```bash # To get the latest released Go client: go get github.com/nats-io/nats.go@latest # To get a specific version: go get github.com/nats-io/nats.go@v1.41.0 # Note that the latest major version for NATS Server is v2: go get github.com/nats-io/nats-server/v2@latest ``` ## Basic Usage ```go import "github.com/nats-io/nats.go" // Connect to a server nc, _ := nats.Connect(nats.DefaultURL) // Simple Publisher nc.Publish("foo", []byte("Hello World")) // Simple Async Subscriber nc.Subscribe("foo", func(m *nats.Msg) { fmt.Printf("Received a message: %s\n", string(m.Data)) }) // Responding to a request message nc.Subscribe("request", func(m *nats.Msg) { m.Respond([]byte("answer is 42")) }) // Simple Sync Subscriber sub, err := nc.SubscribeSync("foo") m, err := sub.NextMsg(timeout) // Channel Subscriber ch := make(chan *nats.Msg, 64) sub, err := nc.ChanSubscribe("foo", ch) msg := <- ch // Unsubscribe sub.Unsubscribe() // Drain sub.Drain() // Requests msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) // Replies nc.Subscribe("help", func(m *nats.Msg) { nc.Publish(m.Reply, []byte("I can help!")) }) // Drain connection (Preferred for responders) // Close() not needed if this is called. nc.Drain() // Close connection nc.Close() ``` ## JetStream [![JetStream API Reference](https://pkg.go.dev/badge/github.com/nats-io/nats.go/jetstream.svg)](https://pkg.go.dev/github.com/nats-io/nats.go/jetstream) JetStream is the built-in NATS persistence system. `nats.go` provides a built-in API enabling both managing JetStream assets as well as publishing/consuming persistent messages. ### Basic usage ```go // connect to nats server nc, _ := nats.Connect(nats.DefaultURL) // create jetstream context from nats connection js, _ := jetstream.New(nc) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() // get existing stream handle stream, _ := js.Stream(ctx, "foo") // retrieve consumer handle from a stream cons, _ := stream.Consumer(ctx, "cons") // consume messages from the consumer in callback cc, _ := cons.Consume(func(msg jetstream.Msg) { fmt.Println("Received jetstream message: ", string(msg.Data())) msg.Ack() }) defer cc.Stop() ``` To find more information on `nats.go` JetStream API, visit [`jetstream/README.md`](jetstream/README.md) > The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md) ## Service API The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The services API is currently in beta release. ## New Authentication (Nkeys and User Credentials) This requires server with version >= 2.0.0 NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys. The simplest form is to use the helper method UserCredentials(credsFilepath). ```go nc, err := nats.Connect(url, nats.UserCredentials("user.creds")) ``` The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server. The core client library never has direct access to your private key and simply performs the callback for signing the server challenge. The helper will load and wipe and erase memory it uses for each connect or reconnect. The helper also can take two entries, one for the JWT and one for the NKey seed file. ```go nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk")) ``` You can also set the callback handlers directly and manage challenge signing directly. ```go nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) ``` Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt ```bash > cat seed.txt # This is my seed nkey! SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM ``` This is a helper function which will load and decode and do the proper signing for the server nonce. It will clear memory in between invocations. You can choose to use the low level option and provide the public key and a signature callback on your own. ```go opt, err := nats.NkeyOptionFromSeed("seed.txt") nc, err := nats.Connect(serverUrl, opt) // Direct nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB)) ``` ## TLS ```go // tls as a scheme will enable secure connections by default. This will also verify the server name. nc, err := nats.Connect("tls://nats.demo.io:4443") // If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. // We provide a helper method to make this case easier. nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) // If the server requires client certificate, there is an helper function for that too: cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") nc, err = nats.Connect("tls://localhost:4443", cert) // You can also supply a complete tls.Config certFile := "./configs/certs/client-cert.pem" keyFile := "./configs/certs/client-key.pem" cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { t.Fatalf("error parsing X509 certificate/key pair: %v", err) } config := &tls.Config{ ServerName: opts.Host, Certificates: []tls.Certificate{cert}, RootCAs: pool, MinVersion: tls.VersionTLS12, } nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) if err != nil { t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) } ``` ## Wildcard Subscriptions ```go // "*" matches any token, at any level of the subject. nc.Subscribe("foo.*.baz", func(m *Msg) { fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); }) nc.Subscribe("foo.bar.*", func(m *Msg) { fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); }) // ">" matches any length of the tail of a subject, and can only be the last token // E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' nc.Subscribe("foo.>", func(m *Msg) { fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); }) // Matches all of the above nc.Publish("foo.bar.baz", []byte("Hello World")) ``` ## Queue Groups ```go // All subscriptions with the same queue name will form a queue group. // Each message will be delivered to only one subscriber per queue group, // using queuing semantics. You can have as many queue groups as you wish. // Normal subscribers will continue to work as expected. nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { received += 1; }) ``` ## Advanced Usage ```go // Normally, the library will return an error when trying to connect and // there is no server running. The RetryOnFailedConnect option will set // the connection in reconnecting state if it failed to connect right away. nc, err := nats.Connect(nats.DefaultURL, nats.RetryOnFailedConnect(true), nats.MaxReconnects(10), nats.ReconnectWait(time.Second), nats.ReconnectHandler(func(_ *nats.Conn) { // Note that this will be invoked for the first asynchronous connect. })) if err != nil { // Should not return an error even if it can't connect, but you still // need to check in case there are some configuration errors. } // Flush connection to server, returns when all messages have been processed. nc.Flush() fmt.Println("All clear!") // FlushTimeout specifies a timeout value as well. err := nc.FlushTimeout(1*time.Second) if err != nil { fmt.Println("All clear!") } else { fmt.Println("Flushed timed out!") } // Auto-unsubscribe after MAX_WANTED messages received const MAX_WANTED = 10 sub, err := nc.Subscribe("foo") sub.AutoUnsubscribe(MAX_WANTED) // Multiple connections nc1 := nats.Connect("nats://host1:4222") nc2 := nats.Connect("nats://host2:4222") nc1.Subscribe("foo", func(m *Msg) { fmt.Printf("Received a message: %s\n", string(m.Data)) }) nc2.Publish("foo", []byte("Hello World!")); ``` ## Clustered Usage ```go var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" nc, err := nats.Connect(servers) // Optionally set ReconnectWait and MaxReconnect attempts. // This example means 10 seconds total per backend. nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) // You can also add some jitter for the reconnection. // This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections. // If not specified, the library defaults to 100 milliseconds and 1 second, respectively. nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second)) // You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried // all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter. // The library will pass the number of times it went through the whole list. nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration { return someBackoffFunction(attempts) })) // Optionally disable randomization of the server pool nc, err = nats.Connect(servers, nats.DontRandomize()) // Setup callbacks to be notified on disconnects, reconnects and connection closed. nc, err = nats.Connect(servers, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { fmt.Printf("Got disconnected! Reason: %q\n", err) }), nats.ReconnectHandler(func(nc *nats.Conn) { fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) }), nats.ClosedHandler(func(nc *nats.Conn) { fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) }) ) // When connecting to a mesh of servers with auto-discovery capabilities, // you may need to provide a username/password or token in order to connect // to any server in that mesh when authentication is required. // Instead of providing the credentials in the initial URL, you will use // new option setters: nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) // For token based authentication: nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) // You can even pass the two at the same time in case one of the server // in the mesh requires token instead of user name and password. nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar"), nats.Token("S3cretT0ken")) // Note that if credentials are specified in the initial URLs, they take // precedence on the credentials specified through the options. // For instance, in the connect call below, the client library will use // the user "my" and password "pwd" to connect to localhost:4222, however, // it will use username "foo" and password "bar" when (re)connecting to // a different server URL that it got as part of the auto-discovery. nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) ``` ## Context support (+Go 1.7) ```go ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() nc, err := nats.Connect(nats.DefaultURL) // Request with context msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) // Synchronous subscriber with context sub, err := nc.SubscribeSync("foo") msg, err := sub.NextMsgWithContext(ctx) ``` ## Backwards compatibility In the development of nats.go, we are committed to maintaining backward compatibility and ensuring a stable and reliable experience for all users. In general, we follow the standard go compatibility guidelines. However, it's important to clarify our stance on certain types of changes: - **Expanding structures:** Adding new fields to structs is not considered a breaking change. - **Adding methods to exported interfaces:** Extending public interfaces with new methods is also not viewed as a breaking change within the context of this project. It is important to note that no unexported methods will be added to interfaces allowing users to implement them. Additionally, this library always supports at least 2 latest minor Go versions. For example, if the latest Go version is 1.22, the library will support Go 1.21 and 1.22. ## License Unless otherwise noted, the NATS source files are distributed under the Apache Version 2.0 license found in the LICENSE file. [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large) nats.go-1.41.0/bench/000077500000000000000000000000001477351342400142375ustar00rootroot00000000000000nats.go-1.41.0/bench/bench.go000066400000000000000000000226661477351342400156610ustar00rootroot00000000000000// Copyright 2016-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bench import ( "bytes" "encoding/csv" "fmt" "log" "math" "strconv" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) // A Sample for a particular client type Sample struct { JobMsgCnt int MsgCnt uint64 MsgBytes uint64 IOBytes uint64 Start time.Time End time.Time } // SampleGroup for a number of samples, the group is a Sample itself aggregating the values the Samples type SampleGroup struct { Sample Samples []*Sample } // Benchmark to hold the various Samples organized by publishers and subscribers type Benchmark struct { Sample Name string RunID string Pubs *SampleGroup Subs *SampleGroup subChannel chan *Sample pubChannel chan *Sample } // NewBenchmark initializes a Benchmark. After creating a bench call AddSubSample/AddPubSample. // When done collecting samples, call EndBenchmark func NewBenchmark(name string, subCnt, pubCnt int) *Benchmark { bm := Benchmark{Name: name, RunID: nuid.Next()} bm.Subs = NewSampleGroup() bm.Pubs = NewSampleGroup() bm.subChannel = make(chan *Sample, subCnt) bm.pubChannel = make(chan *Sample, pubCnt) return &bm } // Close organizes collected Samples and calculates aggregates. After Close(), no more samples can be added. func (bm *Benchmark) Close() { close(bm.subChannel) close(bm.pubChannel) for s := range bm.subChannel { bm.Subs.AddSample(s) } for s := range bm.pubChannel { bm.Pubs.AddSample(s) } if bm.Subs.HasSamples() { bm.Start = bm.Subs.Start bm.End = bm.Subs.End } else { bm.Start = bm.Pubs.Start bm.End = bm.Pubs.End } if bm.Subs.HasSamples() && bm.Pubs.HasSamples() { if bm.Start.After(bm.Subs.Start) { bm.Start = bm.Subs.Start } if bm.Start.After(bm.Pubs.Start) { bm.Start = bm.Pubs.Start } if bm.End.Before(bm.Subs.End) { bm.End = bm.Subs.End } if bm.End.Before(bm.Pubs.End) { bm.End = bm.Pubs.End } } bm.MsgBytes = bm.Pubs.MsgBytes + bm.Subs.MsgBytes bm.IOBytes = bm.Pubs.IOBytes + bm.Subs.IOBytes bm.MsgCnt = bm.Pubs.MsgCnt + bm.Subs.MsgCnt bm.JobMsgCnt = bm.Pubs.JobMsgCnt + bm.Subs.JobMsgCnt } // AddSubSample to the benchmark func (bm *Benchmark) AddSubSample(s *Sample) { bm.subChannel <- s } // AddPubSample to the benchmark func (bm *Benchmark) AddPubSample(s *Sample) { bm.pubChannel <- s } // CSV generates a csv report of all the samples collected func (bm *Benchmark) CSV() string { var buffer bytes.Buffer writer := csv.NewWriter(&buffer) headers := []string{"#RunID", "ClientID", "MsgCount", "MsgBytes", "MsgsPerSec", "BytesPerSec", "DurationSecs"} if err := writer.Write(headers); err != nil { log.Fatalf("Error while serializing headers %q: %v", headers, err) } groups := []*SampleGroup{bm.Subs, bm.Pubs} pre := "S" for i, g := range groups { if i == 1 { pre = "P" } for j, c := range g.Samples { r := []string{bm.RunID, fmt.Sprintf("%s%d", pre, j), fmt.Sprintf("%d", c.MsgCnt), fmt.Sprintf("%d", c.MsgBytes), fmt.Sprintf("%d", c.Rate()), fmt.Sprintf("%f", c.Throughput()), fmt.Sprintf("%f", c.Duration().Seconds())} if err := writer.Write(r); err != nil { log.Fatalf("Error while serializing %v: %v", c, err) } } } writer.Flush() return buffer.String() } // NewSample creates a new Sample initialized to the provided values. The nats.Conn information captured func NewSample(jobCount int, msgSize int, start, end time.Time, nc *nats.Conn) *Sample { s := Sample{JobMsgCnt: jobCount, Start: start, End: end} s.MsgBytes = uint64(msgSize * jobCount) s.MsgCnt = nc.OutMsgs + nc.InMsgs s.IOBytes = nc.OutBytes + nc.InBytes return &s } // Throughput of bytes per second func (s *Sample) Throughput() float64 { return float64(s.MsgBytes) / s.Duration().Seconds() } // Rate of messages in the job per second func (s *Sample) Rate() int64 { return int64(float64(s.JobMsgCnt) / s.Duration().Seconds()) } func (s *Sample) String() string { rate := commaFormat(s.Rate()) throughput := HumanBytes(s.Throughput(), false) return fmt.Sprintf("%s msgs/sec ~ %s/sec", rate, throughput) } // Duration that the sample was active func (s *Sample) Duration() time.Duration { return s.End.Sub(s.Start) } // Seconds that the sample or samples were active func (s *Sample) Seconds() float64 { return s.Duration().Seconds() } // NewSampleGroup initializer func NewSampleGroup() *SampleGroup { s := new(SampleGroup) s.Samples = make([]*Sample, 0) return s } // Statistics information of the sample group (min, average, max and standard deviation) func (sg *SampleGroup) Statistics() string { return fmt.Sprintf("min %s | avg %s | max %s | stddev %s msgs", commaFormat(sg.MinRate()), commaFormat(sg.AvgRate()), commaFormat(sg.MaxRate()), commaFormat(int64(sg.StdDev()))) } // MinRate returns the smallest message rate in the SampleGroup func (sg *SampleGroup) MinRate() int64 { m := int64(0) for i, s := range sg.Samples { if i == 0 { m = s.Rate() } m = min(m, s.Rate()) } return m } // MaxRate returns the largest message rate in the SampleGroup func (sg *SampleGroup) MaxRate() int64 { m := int64(0) for i, s := range sg.Samples { if i == 0 { m = s.Rate() } m = max(m, s.Rate()) } return m } // AvgRate returns the average of all the message rates in the SampleGroup func (sg *SampleGroup) AvgRate() int64 { if !sg.HasSamples() { return 0 } sum := uint64(0) for _, s := range sg.Samples { sum += uint64(s.Rate()) } return int64(sum / uint64(len(sg.Samples))) } // StdDev returns the standard deviation the message rates in the SampleGroup func (sg *SampleGroup) StdDev() float64 { if !sg.HasSamples() { return 0 } avg := float64(sg.AvgRate()) sum := float64(0) for _, c := range sg.Samples { sum += math.Pow(float64(c.Rate())-avg, 2) } variance := sum / float64(len(sg.Samples)) return math.Sqrt(variance) } // AddSample adds a Sample to the SampleGroup. After adding a Sample it shouldn't be modified. func (sg *SampleGroup) AddSample(e *Sample) { sg.Samples = append(sg.Samples, e) if len(sg.Samples) == 1 { sg.Start = e.Start sg.End = e.End } sg.IOBytes += e.IOBytes sg.JobMsgCnt += e.JobMsgCnt sg.MsgCnt += e.MsgCnt sg.MsgBytes += e.MsgBytes if e.Start.Before(sg.Start) { sg.Start = e.Start } if e.End.After(sg.End) { sg.End = e.End } } // HasSamples returns true if the group has samples func (sg *SampleGroup) HasSamples() bool { return len(sg.Samples) > 0 } // Report returns a human readable report of the samples taken in the Benchmark func (bm *Benchmark) Report() string { var buffer bytes.Buffer indent := "" if !bm.Pubs.HasSamples() && !bm.Subs.HasSamples() { return "No publisher or subscribers. Nothing to report." } if bm.Pubs.HasSamples() && bm.Subs.HasSamples() { buffer.WriteString(fmt.Sprintf("%s Pub/Sub stats: %s\n", bm.Name, bm)) indent += " " } if bm.Pubs.HasSamples() { buffer.WriteString(fmt.Sprintf("%sPub stats: %s\n", indent, bm.Pubs)) if len(bm.Pubs.Samples) > 1 { for i, stat := range bm.Pubs.Samples { buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt)) } buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Pubs.Statistics())) } } if bm.Subs.HasSamples() { buffer.WriteString(fmt.Sprintf("%sSub stats: %s\n", indent, bm.Subs)) if len(bm.Subs.Samples) > 1 { for i, stat := range bm.Subs.Samples { buffer.WriteString(fmt.Sprintf("%s [%d] %v (%d msgs)\n", indent, i+1, stat, stat.JobMsgCnt)) } buffer.WriteString(fmt.Sprintf("%s %s\n", indent, bm.Subs.Statistics())) } } return buffer.String() } func commaFormat(n int64) string { in := strconv.FormatInt(n, 10) out := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/3) if in[0] == '-' { in, out[0] = in[1:], '-' } for i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 { out[j] = in[i] if i == 0 { return string(out) } if k++; k == 3 { j, k = j-1, 0 out[j] = ',' } } } // HumanBytes formats bytes as a human readable string func HumanBytes(bytes float64, si bool) string { var base = 1024 pre := []string{"K", "M", "G", "T", "P", "E"} var post = "B" if si { base = 1000 pre = []string{"k", "M", "G", "T", "P", "E"} post = "iB" } if bytes < float64(base) { return fmt.Sprintf("%.2f B", bytes) } exp := int(math.Log(bytes) / math.Log(float64(base))) index := exp - 1 units := pre[index] + post return fmt.Sprintf("%.2f %s", bytes/math.Pow(float64(base), float64(exp)), units) } func min(x, y int64) int64 { if x < y { return x } return y } func max(x, y int64) int64 { if x > y { return x } return y } // MsgsPerClient divides the number of messages by the number of clients and tries to distribute them as evenly as possible func MsgsPerClient(numMsgs, numClients int) []int { var counts []int if numClients == 0 || numMsgs == 0 { return counts } counts = make([]int, numClients) mc := numMsgs / numClients for i := 0; i < numClients; i++ { counts[i] = mc } extra := numMsgs % numClients for i := 0; i < extra; i++ { counts[i]++ } return counts } nats.go-1.41.0/bench/benchlib_test.go000066400000000000000000000143621477351342400174010ustar00rootroot00000000000000// Copyright 2016-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bench import ( "fmt" "strings" "testing" "time" "github.com/nats-io/nats.go" ) const ( MsgSize = 8 Million = 1000 * 1000 ) var baseTime = time.Now() func millionMessagesSecondSample(seconds int) *Sample { messages := Million * seconds start := baseTime end := start.Add(time.Second * time.Duration(seconds)) nc := new(nats.Conn) s := NewSample(messages, MsgSize, start, end, nc) s.MsgCnt = uint64(messages) s.MsgBytes = uint64(messages * MsgSize) s.IOBytes = s.MsgBytes return s } func TestDuration(t *testing.T) { s := millionMessagesSecondSample(1) duration := s.End.Sub(s.Start) if duration != s.Duration() || duration != time.Second { t.Fatal("Expected sample duration to be 1 second") } } func TestSeconds(t *testing.T) { s := millionMessagesSecondSample(1) seconds := s.End.Sub(s.Start).Seconds() if seconds != s.Seconds() || seconds != 1.0 { t.Fatal("Expected sample seconds to be 1 second") } } func TestRate(t *testing.T) { s := millionMessagesSecondSample(60) if s.Rate() != Million { t.Fatal("Expected rate at 1 million msgs") } } func TestThoughput(t *testing.T) { s := millionMessagesSecondSample(60) if s.Throughput() != Million*MsgSize { t.Fatalf("Expected throughput at %d million bytes/sec", MsgSize) } } func TestStrings(t *testing.T) { s := millionMessagesSecondSample(60) if len(s.String()) == 0 { t.Fatal("Sample didn't provide a String") } } func TestGroupDuration(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) duration := sg.End.Sub(sg.Start) if duration != sg.Duration() || duration != time.Duration(2)*time.Second { t.Fatal("Expected aggregate duration to be 2.0 seconds") } } func TestGroupSeconds(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) seconds := sg.End.Sub(sg.Start).Seconds() if seconds != sg.Seconds() || seconds != 3.0 { t.Fatal("Expected aggregate seconds to be 3.0 seconds") } } func TestGroupRate(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) if sg.Rate() != Million*2 { t.Fatal("Expected MsgRate at 2 million msg/sec") } } func TestGroupThoughput(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) if sg.Throughput() != 2*Million*MsgSize { t.Fatalf("Expected throughput at %d million bytes/sec", 2*MsgSize) } } func TestMinMaxRate(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) if sg.MinRate() != sg.MaxRate() { t.Fatal("Expected MinRate == MaxRate") } } func TestAvgRate(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) if sg.MinRate() != sg.AvgRate() { t.Fatal("Expected MinRate == AvgRate") } } func TestStdDev(t *testing.T) { sg := NewSampleGroup() sg.AddSample(millionMessagesSecondSample(1)) sg.AddSample(millionMessagesSecondSample(2)) sg.AddSample(millionMessagesSecondSample(3)) if sg.StdDev() != 0.0 { t.Fatal("Expected stddev to be zero") } } func TestBenchSetup(t *testing.T) { bench := NewBenchmark("test", 1, 1) bench.AddSubSample(millionMessagesSecondSample(1)) bench.AddPubSample(millionMessagesSecondSample(1)) bench.Close() if len(bench.RunID) == 0 { t.Fatal("Bench doesn't have a RunID") } if len(bench.Pubs.Samples) != 1 { t.Fatal("Expected one publisher") } if len(bench.Subs.Samples) != 1 { t.Fatal("Expected one subscriber") } if bench.MsgCnt != 2*Million { t.Fatal("Expected 2 million msgs") } if bench.IOBytes != 2*Million*MsgSize { t.Fatalf("Expected %d million bytes", 2*MsgSize) } if bench.Duration() != time.Second { t.Fatal("Expected duration to be 1 second") } } func makeBench(subs, pubs int) *Benchmark { bench := NewBenchmark("test", subs, pubs) for i := 0; i < subs; i++ { bench.AddSubSample(millionMessagesSecondSample(1)) } for i := 0; i < pubs; i++ { bench.AddPubSample(millionMessagesSecondSample(1)) } bench.Close() return bench } func TestCsv(t *testing.T) { bench := makeBench(1, 1) csv := bench.CSV() lines := strings.Split(csv, "\n") if len(lines) != 4 { t.Fatal("Expected 4 lines of output from the CSV string") } fields := strings.Split(lines[1], ",") if len(fields) != 7 { t.Fatal("Expected 7 fields") } } func TestBenchStrings(t *testing.T) { bench := makeBench(1, 1) s := bench.Report() lines := strings.Split(s, "\n") if len(lines) != 4 { t.Fatal("Expected 3 lines of output: header, pub, sub, empty") } bench = makeBench(2, 2) s = bench.Report() lines = strings.Split(s, "\n") if len(lines) != 10 { fmt.Printf("%q\n", s) t.Fatal("Expected 11 lines of output: header, pub header, pub x 2, stats, sub headers, sub x 2, stats, empty") } } func TestMsgsPerClient(t *testing.T) { zero := MsgsPerClient(0, 0) if len(zero) != 0 { t.Fatal("Expected 0 length for 0 clients") } onetwo := MsgsPerClient(1, 2) if len(onetwo) != 2 || onetwo[0] != 1 || onetwo[1] != 0 { t.Fatal("Expected uneven distribution") } twotwo := MsgsPerClient(2, 2) if len(twotwo) != 2 || twotwo[0] != 1 || twotwo[1] != 1 { t.Fatal("Expected even distribution") } threetwo := MsgsPerClient(3, 2) if len(threetwo) != 2 || threetwo[0] != 2 || threetwo[1] != 1 { t.Fatal("Expected uneven distribution") } } nats.go-1.41.0/context.go000066400000000000000000000133211477351342400151730ustar00rootroot00000000000000// Copyright 2016-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "context" "reflect" ) // RequestMsgWithContext takes a context, a subject and payload // in bytes and request expecting a single response. func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { if msg == nil { return nil, ErrInvalidMsg } hdr, err := msg.headerBytes() if err != nil { return nil, err } return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) } // RequestWithContext takes a context, a subject and payload // in bytes and request expecting a single response. func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { return nc.requestWithContext(ctx, subj, nil, data) } func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { if ctx == nil { return nil, ErrInvalidContext } if nc == nil { return nil, ErrInvalidConnection } // Check whether the context is done already before making // the request. if ctx.Err() != nil { return nil, ctx.Err() } var m *Msg var err error // If user wants the old style. if nc.useOldRequestStyle() { m, err = nc.oldRequestWithContext(ctx, subj, hdr, data) } else { mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) if err != nil { return nil, err } var ok bool select { case m, ok = <-mch: if !ok { return nil, ErrConnectionClosed } case <-ctx.Done(): nc.mu.Lock() delete(nc.respMap, token) nc.mu.Unlock() return nil, ctx.Err() } } // Check for no responder status. if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { m, err = nil, ErrNoResponders } return m, err } // oldRequestWithContext utilizes inbox and subscription per request. func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { inbox := nc.NewInbox() ch := make(chan *Msg, RequestChanLen) s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, nil, true, nil) if err != nil { return nil, err } s.AutoUnsubscribe(1) defer s.Unsubscribe() err = nc.publish(subj, inbox, hdr, data) if err != nil { return nil, err } return s.NextMsgWithContext(ctx) } func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) { if ctx == nil { return nil, ErrInvalidContext } if s == nil { return nil, ErrBadSubscription } if ctx.Err() != nil { return nil, ctx.Err() } s.mu.Lock() err := s.validateNextMsgState(pullSubInternal) if err != nil { s.mu.Unlock() return nil, err } // snapshot mch := s.mch s.mu.Unlock() var ok bool var msg *Msg // If something is available right away, let's optimize that case. select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } return msg, nil default: // If internal and we don't want to wait, signal that there is no // message in the internal queue. if pullSubInternal && !waitIfNoMsg { return nil, errNoMessages } } select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } case <-ctx.Done(): return nil, ctx.Err() } return msg, nil } // NextMsgWithContext takes a context and returns the next message // available to a synchronous subscriber, blocking until it is delivered // or context gets canceled. func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { return s.nextMsgWithContext(ctx, false, true) } // FlushWithContext will allow a context to control the duration // of a Flush() call. This context should be non-nil and should // have a deadline set. We will return an error if none is present. func (nc *Conn) FlushWithContext(ctx context.Context) error { if nc == nil { return ErrInvalidConnection } if ctx == nil { return ErrInvalidContext } _, ok := ctx.Deadline() if !ok { return ErrNoDeadlineContext } nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } // Create a buffered channel to prevent chan send to block // in processPong() ch := make(chan struct{}, 1) nc.sendPing(ch) nc.mu.Unlock() var err error select { case _, ok := <-ch: if !ok { err = ErrConnectionClosed } else { close(ch) } case <-ctx.Done(): err = ctx.Err() } if err != nil { nc.removeFlushEntry(ch) } return err } // RequestWithContext will create an Inbox and perform a Request // using the provided cancellation context with the Inbox reply // for the data v. A response will be decoded into the vPtr last parameter. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error { if ctx == nil { return ErrInvalidContext } b, err := c.Enc.Encode(subject, v) if err != nil { return err } m, err := c.Conn.RequestWithContext(ctx, subject, b) if err != nil { return err } if reflect.TypeOf(vPtr) == emptyMsgType { mPtr := vPtr.(*Msg) *mPtr = *m } else { err := c.Enc.Decode(m.Subject, m.Data, vPtr) if err != nil { return err } } return nil } nats.go-1.41.0/dependencies.md000066400000000000000000000012331477351342400161270ustar00rootroot00000000000000# External Dependencies This file lists the dependencies used in this repository. | Dependency | License | |-----------------------------------|--------------| | Go | BSD 3-Clause | | github.com/golang/protobuf/proto | BSD-3-Clause | | github.com/klauspost/compress | BSD-3-Clause | | github.com/nats-io/nats-server/v2 | Apache-2.0 | | github.com/nats-io/nkeys | Apache-2.0 | | github.com/nats-io/nuid | Apache-2.0 | | go.uber.org/goleak | MIT | | golang.org/x/text | BSD-3-Clause | | google.golang.org/protobuf | BSD-3-Clause | nats.go-1.41.0/dependencies.tpl000066400000000000000000000010651477351342400163310ustar00rootroot00000000000000# External Dependencies This file lists the dependencies used in this repository. {{/* compress has actually a BSD 3-Clause license, but the License file in the repo confuses go-license tooling, hence the manual exception */}} | Dependency | License | |--------------------------------------------------|-----------------------------------------| {{ range . }}| {{ .Name }} | {{ if eq .Name "github.com/klauspost/compress/flate" }}BSD 3-Clause{{ else }}{{ .LicenseName }}{{ end }} | {{ end }} nats.go-1.41.0/enc.go000066400000000000000000000223141477351342400142560ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "errors" "fmt" "reflect" "sync" "time" // Default Encoders "github.com/nats-io/nats.go/encoders/builtin" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn // Encoder interface is for all register encoders // // Deprecated: Encoded connections are no longer supported. type Encoder interface { Encode(subject string, v any) ([]byte, error) Decode(subject string, data []byte, vPtr any) error } var encMap map[string]Encoder var encLock sync.Mutex // Indexed names into the Registered Encoders. const ( JSON_ENCODER = "json" GOB_ENCODER = "gob" DEFAULT_ENCODER = "default" ) func init() { encMap = make(map[string]Encoder) // Register json, gob and default encoder RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{}) RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{}) RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{}) } // EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to // a nats server and have an extendable encoder system that will encode and decode messages // from raw Go types. // // Deprecated: Encoded connections are no longer supported. type EncodedConn struct { Conn *Conn Enc Encoder } // NewEncodedConn will wrap an existing Connection and utilize the appropriate registered // encoder. // // Deprecated: Encoded connections are no longer supported. func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { if c == nil { return nil, errors.New("nats: Nil Connection") } if c.IsClosed() { return nil, ErrConnectionClosed } ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} if ec.Enc == nil { return nil, fmt.Errorf("no encoder registered for '%s'", encType) } return ec, nil } // RegisterEncoder will register the encType with the given Encoder. Useful for customization. // // Deprecated: Encoded connections are no longer supported. func RegisterEncoder(encType string, enc Encoder) { encLock.Lock() defer encLock.Unlock() encMap[encType] = enc } // EncoderForType will return the registered Encoder for the encType. // // Deprecated: Encoded connections are no longer supported. func EncoderForType(encType string) Encoder { encLock.Lock() defer encLock.Unlock() return encMap[encType] } // Publish publishes the data argument to the given subject. The data argument // will be encoded using the associated encoder. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Publish(subject string, v any) error { b, err := c.Enc.Encode(subject, v) if err != nil { return err } return c.Conn.publish(subject, _EMPTY_, nil, b) } // PublishRequest will perform a Publish() expecting a response on the // reply subject. Use Request() for automatically waiting for a response // inline. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) PublishRequest(subject, reply string, v any) error { b, err := c.Enc.Encode(subject, v) if err != nil { return err } return c.Conn.publish(subject, reply, nil, b) } // Request will create an Inbox and perform a Request() call // with the Inbox reply for the data v. A response will be // decoded into the vPtr Response. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error { b, err := c.Enc.Encode(subject, v) if err != nil { return err } m, err := c.Conn.Request(subject, b, timeout) if err != nil { return err } if reflect.TypeOf(vPtr) == emptyMsgType { mPtr := vPtr.(*Msg) *mPtr = *m } else { err = c.Enc.Decode(m.Subject, m.Data, vPtr) } return err } // Handler is a specific callback used for Subscribe. It is generalized to // an any, but we will discover its format and arguments at runtime // and perform the correct callback, including demarshaling encoded data // back into the appropriate struct based on the signature of the Handler. // // Handlers are expected to have one of four signatures. // // type person struct { // Name string `json:"name,omitempty"` // Age uint `json:"age,omitempty"` // } // // handler := func(m *Msg) // handler := func(p *person) // handler := func(subject string, o *obj) // handler := func(subject, reply string, o *obj) // // These forms allow a callback to request a raw Msg ptr, where the processing // of the message from the wire is untouched. Process a JSON representation // and demarshal it into the given struct, e.g. person. // There are also variants where the callback wants either the subject, or the // subject and the reply subject. // // Deprecated: Encoded connections are no longer supported. type Handler any // Dissect the cb Handler's signature func argInfo(cb Handler) (reflect.Type, int) { cbType := reflect.TypeOf(cb) if cbType.Kind() != reflect.Func { panic("nats: Handler needs to be a func") } numArgs := cbType.NumIn() if numArgs == 0 { return nil, numArgs } return cbType.In(numArgs - 1), numArgs } var emptyMsgType = reflect.TypeOf(&Msg{}) // Subscribe will create a subscription on the given subject and process incoming // messages using the specified Handler. The Handler should be a func that matches // a signature from the description of Handler from above. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { return c.subscribe(subject, _EMPTY_, cb) } // QueueSubscribe will create a queue subscription on the given subject and process // incoming messages using the specified Handler. The Handler should be a func that // matches a signature from the description of Handler from above. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { return c.subscribe(subject, queue, cb) } // Internal implementation that all public functions will use. func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { if cb == nil { return nil, errors.New("nats: Handler required for EncodedConn Subscription") } argType, numArgs := argInfo(cb) if argType == nil { return nil, errors.New("nats: Handler requires at least one argument") } cbValue := reflect.ValueOf(cb) wantsRaw := (argType == emptyMsgType) natsCB := func(m *Msg) { var oV []reflect.Value if wantsRaw { oV = []reflect.Value{reflect.ValueOf(m)} } else { var oPtr reflect.Value if argType.Kind() != reflect.Ptr { oPtr = reflect.New(argType) } else { oPtr = reflect.New(argType.Elem()) } if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { if c.Conn.Opts.AsyncErrorCB != nil { c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) }) } return } if argType.Kind() != reflect.Ptr { oPtr = reflect.Indirect(oPtr) } // Callback Arity switch numArgs { case 1: oV = []reflect.Value{oPtr} case 2: subV := reflect.ValueOf(m.Subject) oV = []reflect.Value{subV, oPtr} case 3: subV := reflect.ValueOf(m.Subject) replyV := reflect.ValueOf(m.Reply) oV = []reflect.Value{subV, replyV, oPtr} } } cbValue.Call(oV) } return c.Conn.subscribe(subject, queue, natsCB, nil, nil, false, nil) } // FlushTimeout allows a Flush operation to have an associated timeout. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { return c.Conn.FlushTimeout(timeout) } // Flush will perform a round trip to the server and return when it // receives the internal reply. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Flush() error { return c.Conn.Flush() } // Close will close the connection to the server. This call will release // all blocking calls, such as Flush(), etc. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Close() { c.Conn.Close() } // Drain will put a connection into a drain state. All subscriptions will // immediately be put into a drain state. Upon completion, the publishers // will be drained and can not publish any additional messages. Upon draining // of the publishers, the connection will be closed. Use the ClosedCB() // option to know when the connection has moved from draining to closed. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) Drain() error { return c.Conn.Drain() } // LastError reports the last error encountered via the Connection. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) LastError() error { return c.Conn.LastError() } nats.go-1.41.0/encoders/000077500000000000000000000000001477351342400147625ustar00rootroot00000000000000nats.go-1.41.0/encoders/builtin/000077500000000000000000000000001477351342400164305ustar00rootroot00000000000000nats.go-1.41.0/encoders/builtin/default_enc.go000066400000000000000000000055161477351342400212370ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builtin import ( "bytes" "fmt" "reflect" "strconv" "unsafe" ) // DefaultEncoder implementation for EncodedConn. // This encoder will leave []byte and string untouched, but will attempt to // turn numbers into appropriate strings that can be decoded. It will also // properly encoded and decode bools. If will encode a struct, but if you want // to properly handle structures you should use JsonEncoder. // // Deprecated: Encoded connections are no longer supported. type DefaultEncoder struct { // Empty } var trueB = []byte("true") var falseB = []byte("false") var nilB = []byte("") // Encode // // Deprecated: Encoded connections are no longer supported. func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) { switch arg := v.(type) { case string: bytes := *(*[]byte)(unsafe.Pointer(&arg)) return bytes, nil case []byte: return arg, nil case bool: if arg { return trueB, nil } else { return falseB, nil } case nil: return nilB, nil default: var buf bytes.Buffer fmt.Fprintf(&buf, "%+v", arg) return buf.Bytes(), nil } } // Decode // // Deprecated: Encoded connections are no longer supported. func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error { // Figure out what it's pointing to... sData := *(*string)(unsafe.Pointer(&data)) switch arg := vPtr.(type) { case *string: *arg = sData return nil case *[]byte: *arg = data return nil case *int: n, err := strconv.ParseInt(sData, 10, 64) if err != nil { return err } *arg = int(n) return nil case *int32: n, err := strconv.ParseInt(sData, 10, 64) if err != nil { return err } *arg = int32(n) return nil case *int64: n, err := strconv.ParseInt(sData, 10, 64) if err != nil { return err } *arg = int64(n) return nil case *float32: n, err := strconv.ParseFloat(sData, 32) if err != nil { return err } *arg = float32(n) return nil case *float64: n, err := strconv.ParseFloat(sData, 64) if err != nil { return err } *arg = float64(n) return nil case *bool: b, err := strconv.ParseBool(sData) if err != nil { return err } *arg = b return nil default: vt := reflect.TypeOf(arg).Elem() return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) } } nats.go-1.41.0/encoders/builtin/gob_enc.go000066400000000000000000000027351477351342400203620ustar00rootroot00000000000000// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builtin import ( "bytes" "encoding/gob" ) // GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. // This encoder will use the builtin encoding/gob to Marshal // and Unmarshal most types, including structs. // // Deprecated: Encoded connections are no longer supported. type GobEncoder struct { // Empty } // FIXME(dlc) - This could probably be more efficient. // Encode // // Deprecated: Encoded connections are no longer supported. func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) { b := new(bytes.Buffer) enc := gob.NewEncoder(b) if err := enc.Encode(v); err != nil { return nil, err } return b.Bytes(), nil } // Decode // // Deprecated: Encoded connections are no longer supported. func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) { dec := gob.NewDecoder(bytes.NewBuffer(data)) err = dec.Decode(vPtr) return } nats.go-1.41.0/encoders/builtin/json_enc.go000066400000000000000000000033651477351342400205640ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builtin import ( "encoding/json" "strings" ) // JsonEncoder is a JSON Encoder implementation for EncodedConn. // This encoder will use the builtin encoding/json to Marshal // and Unmarshal most types, including structs. // // Deprecated: Encoded connections are no longer supported. type JsonEncoder struct { // Empty } // Encode // // Deprecated: Encoded connections are no longer supported. func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) { b, err := json.Marshal(v) if err != nil { return nil, err } return b, nil } // Decode // // Deprecated: Encoded connections are no longer supported. func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) { switch arg := vPtr.(type) { case *string: // If they want a string and it is a JSON string, strip quotes // This allows someone to send a struct but receive as a plain string // This cast should be efficient for Go 1.3 and beyond. str := string(data) if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { *arg = str[1 : len(str)-1] } else { *arg = str } case *[]byte: *arg = data default: err = json.Unmarshal(data, arg) } return } nats.go-1.41.0/encoders/protobuf/000077500000000000000000000000001477351342400166225ustar00rootroot00000000000000nats.go-1.41.0/encoders/protobuf/protobuf_enc.go000066400000000000000000000041001477351342400216310ustar00rootroot00000000000000// Copyright 2015-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package protobuf import ( "errors" "github.com/nats-io/nats.go" "google.golang.org/protobuf/proto" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn // Additional index for registered Encoders. const ( PROTOBUF_ENCODER = "protobuf" ) func init() { // Register protobuf encoder nats.RegisterEncoder(PROTOBUF_ENCODER, &ProtobufEncoder{}) } // ProtobufEncoder is a protobuf implementation for EncodedConn // This encoder will use the builtin protobuf lib to Marshal // and Unmarshal structs. // // Deprecated: Encoded connections are no longer supported. type ProtobufEncoder struct { // Empty } var ( ErrInvalidProtoMsgEncode = errors.New("nats: Invalid protobuf proto.Message object passed to encode") ErrInvalidProtoMsgDecode = errors.New("nats: Invalid protobuf proto.Message object passed to decode") ) // Encode // // Deprecated: Encoded connections are no longer supported. func (pb *ProtobufEncoder) Encode(subject string, v any) ([]byte, error) { if v == nil { return nil, nil } i, found := v.(proto.Message) if !found { return nil, ErrInvalidProtoMsgEncode } b, err := proto.Marshal(i) if err != nil { return nil, err } return b, nil } // Decode // // Deprecated: Encoded connections are no longer supported. func (pb *ProtobufEncoder) Decode(subject string, data []byte, vPtr any) error { if _, ok := vPtr.(*any); ok { return nil } i, found := vPtr.(proto.Message) if !found { return ErrInvalidProtoMsgDecode } return proto.Unmarshal(data, i) } nats.go-1.41.0/encoders/protobuf/testdata/000077500000000000000000000000001477351342400204335ustar00rootroot00000000000000nats.go-1.41.0/encoders/protobuf/testdata/pbtest.pb.go000066400000000000000000000136141477351342400226700ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.21.0 // protoc v3.11.4 // source: pbtest.proto package testdata import ( proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type Person struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Age int32 `protobuf:"varint,2,opt,name=age,proto3" json:"age,omitempty"` Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` Children map[string]*Person `protobuf:"bytes,10,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Person) Reset() { *x = Person{} if protoimpl.UnsafeEnabled { mi := &file_pbtest_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Person) String() string { return protoimpl.X.MessageStringOf(x) } func (*Person) ProtoMessage() {} func (x *Person) ProtoReflect() protoreflect.Message { mi := &file_pbtest_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Person.ProtoReflect.Descriptor instead. func (*Person) Descriptor() ([]byte, []int) { return file_pbtest_proto_rawDescGZIP(), []int{0} } func (x *Person) GetName() string { if x != nil { return x.Name } return "" } func (x *Person) GetAge() int32 { if x != nil { return x.Age } return 0 } func (x *Person) GetAddress() string { if x != nil { return x.Address } return "" } func (x *Person) GetChildren() map[string]*Person { if x != nil { return x.Children } return nil } var File_pbtest_proto protoreflect.FileDescriptor var file_pbtest_proto_rawDesc = []byte{ 0x0a, 0x0c, 0x70, 0x62, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74, 0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, 0x22, 0xd3, 0x01, 0x0a, 0x06, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x4d, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_pbtest_proto_rawDescOnce sync.Once file_pbtest_proto_rawDescData = file_pbtest_proto_rawDesc ) func file_pbtest_proto_rawDescGZIP() []byte { file_pbtest_proto_rawDescOnce.Do(func() { file_pbtest_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbtest_proto_rawDescData) }) return file_pbtest_proto_rawDescData } var file_pbtest_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_pbtest_proto_goTypes = []any{ (*Person)(nil), // 0: testdata.Person nil, // 1: testdata.Person.ChildrenEntry } var file_pbtest_proto_depIdxs = []int32{ 1, // 0: testdata.Person.children:type_name -> testdata.Person.ChildrenEntry 0, // 1: testdata.Person.ChildrenEntry.value:type_name -> testdata.Person 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name } func init() { file_pbtest_proto_init() } func file_pbtest_proto_init() { if File_pbtest_proto != nil { return } if !protoimpl.UnsafeEnabled { file_pbtest_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Person); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pbtest_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_pbtest_proto_goTypes, DependencyIndexes: file_pbtest_proto_depIdxs, MessageInfos: file_pbtest_proto_msgTypes, }.Build() File_pbtest_proto = out.File file_pbtest_proto_rawDesc = nil file_pbtest_proto_goTypes = nil file_pbtest_proto_depIdxs = nil } nats.go-1.41.0/encoders/protobuf/testdata/pbtest.proto000066400000000000000000000002321477351342400230160ustar00rootroot00000000000000syntax = "proto3"; package testdata; message Person { string name = 1; int32 age = 2; string address = 3; map children = 10; } nats.go-1.41.0/example_test.go000066400000000000000000000523301477351342400162040ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats_test import ( "context" "fmt" "log" "net" "time" "github.com/nats-io/nats.go" ) // Shows different ways to create a Conn. func ExampleConnect() { nc, _ := nats.Connect("demo.nats.io") nc.Close() nc, _ = nats.Connect("nats://derek:secretpassword@demo.nats.io:4222") nc.Close() nc, _ = nats.Connect("tls://derek:secretpassword@demo.nats.io:4443") nc.Close() opts := nats.Options{ AllowReconnect: true, MaxReconnect: 10, ReconnectWait: 5 * time.Second, Timeout: 1 * time.Second, } nc, _ = opts.Connect() nc.Close() } type skipTLSDialer struct { dialer *net.Dialer skipTLS bool } func (sd *skipTLSDialer) Dial(network, address string) (net.Conn, error) { return sd.dialer.Dial(network, address) } func (sd *skipTLSDialer) SkipTLSHandshake() bool { return sd.skipTLS } func ExampleCustomDialer() { // Given the following CustomDialer implementation: // // type skipTLSDialer struct { // dialer *net.Dialer // skipTLS bool // } // // func (sd *skipTLSDialer) Dial(network, address string) (net.Conn, error) { // return sd.dialer.Dial(network, address) // } // // func (sd *skipTLSDialer) SkipTLSHandshake() bool { // return true // } // sd := &skipTLSDialer{dialer: &net.Dialer{Timeout: 2 * time.Second}, skipTLS: true} nc, _ := nats.Connect("demo.nats.io", nats.SetCustomDialer(sd)) defer nc.Close() } // This Example shows an asynchronous subscriber. func ExampleConn_Subscribe() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() nc.Subscribe("foo", func(m *nats.Msg) { fmt.Printf("Received a message: %s\n", string(m.Data)) }) } func ExampleConn_ForceReconnect() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() nc.Subscribe("foo", func(m *nats.Msg) { fmt.Printf("Received a message: %s\n", string(m.Data)) }) // Reconnect to the server. // the subscription will be recreated after the reconnect. nc.ForceReconnect() } // This Example shows a synchronous subscriber. func ExampleConn_SubscribeSync() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() sub, _ := nc.SubscribeSync("foo") m, err := sub.NextMsg(1 * time.Second) if err == nil { fmt.Printf("Received a message: %s\n", string(m.Data)) } else { fmt.Println("NextMsg timed out.") } } func ExampleSubscription_NextMsg() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() sub, _ := nc.SubscribeSync("foo") m, err := sub.NextMsg(1 * time.Second) if err == nil { fmt.Printf("Received a message: %s\n", string(m.Data)) } else { fmt.Println("NextMsg timed out.") } } func ExampleSubscription_Unsubscribe() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() sub, _ := nc.SubscribeSync("foo") // ... sub.Unsubscribe() } func ExampleConn_Publish() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() nc.Publish("foo", []byte("Hello World!")) } func ExampleConn_PublishMsg() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} nc.PublishMsg(msg) } func ExampleConn_Flush() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} for i := 0; i < 1000; i++ { nc.PublishMsg(msg) } err := nc.Flush() if err == nil { // Everything has been processed by the server for nc *Conn. } } func ExampleConn_FlushTimeout() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} for i := 0; i < 1000; i++ { nc.PublishMsg(msg) } // Only wait for up to 1 second for Flush err := nc.FlushTimeout(1 * time.Second) if err == nil { // Everything has been processed by the server for nc *Conn. } } func ExampleConn_Request() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, []byte("I will help you")) }) nc.Request("foo", []byte("help"), 50*time.Millisecond) } func ExampleConn_QueueSubscribe() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() received := 0 nc.QueueSubscribe("foo", "worker_group", func(_ *nats.Msg) { received++ }) } func ExampleSubscription_AutoUnsubscribe() { nc, _ := nats.Connect(nats.DefaultURL) defer nc.Close() received, wanted, total := 0, 10, 100 sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) { received++ }) sub.AutoUnsubscribe(wanted) for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() fmt.Printf("Received = %d", received) } func ExampleConn_Close() { nc, _ := nats.Connect(nats.DefaultURL) nc.Close() } func ExampleJetStream() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Use the JetStream context to produce and consumer messages // that have been persisted. js, err := nc.JetStream(nats.PublishAsyncMaxPending(256)) if err != nil { log.Fatal(err) } js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) js.Publish("foo", []byte("Hello JS!")) // Publish messages asynchronously. for i := 0; i < 500; i++ { js.PublishAsync("foo", []byte("Hello JS Async!")) } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): fmt.Println("Did not resolve in time") } // Create async consumer on subject 'foo'. Async subscribers // ack a message once exiting the callback. js.Subscribe("foo", func(msg *nats.Msg) { meta, _ := msg.Metadata() fmt.Printf("Stream Sequence : %v\n", meta.Sequence.Stream) fmt.Printf("Consumer Sequence: %v\n", meta.Sequence.Consumer) }) // Async subscriber with manual acks. js.Subscribe("foo", func(msg *nats.Msg) { msg.Ack() }, nats.ManualAck()) // Async queue subscription where members load balance the // received messages together. // If no consumer name is specified, either with nats.Bind() // or nats.Durable() options, the queue name is used as the // durable name (that is, as if you were passing the // nats.Durable() option. // It is recommended to use nats.Bind() or nats.Durable() // and preferably create the JetStream consumer beforehand // (using js.AddConsumer) so that the JS consumer is not // deleted on an Unsubscribe() or Drain() when the member // that created the consumer goes away first. // Check Godoc for the QueueSubscribe() API for more details. js.QueueSubscribe("foo", "group", func(msg *nats.Msg) { msg.Ack() }, nats.ManualAck()) // Subscriber to consume messages synchronously. sub, _ := js.SubscribeSync("foo") msg, _ := sub.NextMsg(2 * time.Second) msg.Ack() // We can add a member to the group, with this member using // the synchronous version of the QueueSubscribe. sub, _ = js.QueueSubscribeSync("foo", "group") msg, _ = sub.NextMsg(2 * time.Second) msg.Ack() // ChanSubscribe msgCh := make(chan *nats.Msg, 8192) sub, _ = js.ChanSubscribe("foo", msgCh) select { case msg := <-msgCh: fmt.Println("[Received]", msg) case <-time.After(1 * time.Second): } // Create Pull based consumer with maximum 128 inflight. sub, _ = js.PullSubscribe("foo", "wq", nats.PullMaxWaiting(128)) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for { select { case <-ctx.Done(): return default: } msgs, _ := sub.Fetch(10, nats.Context(ctx)) for _, msg := range msgs { msg.Ack() } } } // A JetStream context can be configured with a default timeout using nats.MaxWait // or with a custom API prefix in case of using an imported JetStream from another account. func ExampleJSOpt() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Use the JetStream context to manage streams and consumers (with nats.APIPrefix JSOpt) js, err := nc.JetStream(nats.APIPrefix("dlc"), nats.MaxWait(5*time.Second)) if err != nil { log.Fatal(err) } sub, _ := js.SubscribeSync("foo") js.Publish("foo", []byte("Hello JS!")) sub.NextMsg(2 * time.Second) } func ExampleJetStreamManager() { nc, _ := nats.Connect("localhost") js, _ := nc.JetStream() // Create a stream js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, MaxBytes: 1024, }) // Update a stream js.UpdateStream(&nats.StreamConfig{ Name: "FOO", MaxBytes: 2048, }) // Create a durable consumer js.AddConsumer("FOO", &nats.ConsumerConfig{ Durable: "BAR", }) // Get information about all streams (with Context JSOpt) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for info := range js.StreamsInfo(nats.Context(ctx)) { fmt.Println("stream name:", info.Config.Name) } // Get information about all consumers (with MaxWait JSOpt) for info := range js.ConsumersInfo("FOO", nats.MaxWait(10*time.Second)) { fmt.Println("consumer name:", info.Name) } // Delete a consumer js.DeleteConsumer("FOO", "BAR") // Delete a stream js.DeleteStream("FOO") } // A JetStreamContext is the composition of a JetStream and JetStreamManagement interfaces. // In case of only requiring publishing/consuming messages, can create a context that // only uses the JetStream interface. func ExampleJetStreamContext() { nc, _ := nats.Connect("localhost") var js nats.JetStream var jsm nats.JetStreamManager var jsctx nats.JetStreamContext // JetStream that can publish/subscribe but cannot manage streams. js, _ = nc.JetStream() js.Publish("foo", []byte("hello")) // JetStream context that can manage streams/consumers but cannot produce messages. jsm, _ = nc.JetStream() jsm.AddStream(&nats.StreamConfig{Name: "FOO"}) // JetStream context that can both manage streams/consumers // as well as publish/subscribe. jsctx, _ = nc.JetStream() jsctx.AddStream(&nats.StreamConfig{Name: "BAR"}) jsctx.Publish("bar", []byte("hello world")) } func ExamplePubOpt() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Create JetStream context to produce/consumer messages that will be persisted. js, err := nc.JetStream() if err != nil { log.Fatal(err) } // Create stream to persist messages published on 'foo'. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) // Publish is synchronous by default, and waits for a PubAck response. js.Publish("foo", []byte("Hello JS!")) // Publish with a custom timeout. js.Publish("foo", []byte("Hello JS!"), nats.AckWait(500*time.Millisecond)) // Publish with a context. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() js.Publish("foo", []byte("Hello JS!"), nats.Context(ctx)) // Publish and assert the expected stream name. js.Publish("foo", []byte("Hello JS!"), nats.ExpectStream("FOO")) // Publish and assert the last sequence. js.Publish("foo", []byte("Hello JS!"), nats.ExpectLastSequence(5)) // Publish and tag the message with an ID. js.Publish("foo", []byte("Hello JS!"), nats.MsgId("foo:6")) // Publish and assert the last msg ID. js.Publish("foo", []byte("Hello JS!"), nats.ExpectLastMsgId("foo:6")) } func ExampleSubOpt() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Create JetStream context to produce/consumer messages that will be persisted. js, err := nc.JetStream() if err != nil { log.Fatal(err) } // Auto-ack each individual message. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }) // Auto-ack current sequence and all below. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.AckAll()) // Auto-ack each individual message. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.AckExplicit()) // Acks are not required. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.AckNone()) // Manually acknowledge messages. js.Subscribe("foo", func(msg *nats.Msg) { msg.Ack() }, nats.ManualAck()) // Bind to an existing stream. sub, _ := js.SubscribeSync("origin", nats.BindStream("m1")) msg, _ := sub.NextMsg(2 * time.Second) msg.Ack() // Deliver all messages from the beginning. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.DeliverAll()) // Deliver messages starting from the last one. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.DeliverLast()) // Deliver only new messages that arrive after subscription. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.DeliverNew()) // Create durable consumer FOO, if it doesn't exist. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.Durable("FOO")) // Create consumer on Foo with flow control and heartbeats. js.SubscribeSync("foo", // Redeliver after 30s nats.AckWait(30*time.Second), // Redeliver only once nats.MaxDeliver(1), // Activate Flow control algorithm from the server. nats.EnableFlowControl(), // Track heartbeats from the server for missed sequences. nats.IdleHeartbeat(500*time.Millisecond), ) // Set the allowable number of outstanding acks. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.MaxAckPending(5)) // Set the number of redeliveries for a message. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.MaxDeliver(5)) // Set the number the max inflight pull requests. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.PullMaxWaiting(5)) // Set the number the max inflight pull requests. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.PullMaxWaiting(5)) // Set the rate limit on a push consumer. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.RateLimit(1024)) // Replay messages at original speed, instead of as fast as possible. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.ReplayOriginal()) // Start delivering messages at a given sequence. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.StartSequence(10)) // Start delivering messages at a given time. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.StartTime(time.Now().Add(-2*time.Hour))) // Start delivering messages with delay based on BackOff array of time durations. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.ManualAck(), nats.MaxDeliver(2), nats.BackOff([]time.Duration{50 * time.Millisecond, 250 * time.Millisecond})) // Set consumer replicas count for a durable while subscribing. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.Durable("FOO"), nats.ConsumerReplicas(1)) // Force memory storage while subscribing. js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.Durable("FOO"), nats.ConsumerMemoryStorage()) // Skip consumer lookup when using explicit consumer name js.Subscribe("foo", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.Durable("FOO"), nats.SkipConsumerLookup()) // Use multiple subject filters. js.Subscribe("", func(msg *nats.Msg) { fmt.Printf("Received a message: %s\n", string(msg.Data)) }, nats.Durable("FOO"), nats.ConsumerFilterSubjects("foo", "bar"), nats.BindStream("test_stream")) } func ExampleMaxWait() { nc, _ := nats.Connect("localhost") // Set default timeout for JetStream API requests, // following requests will inherit this timeout. js, _ := nc.JetStream(nats.MaxWait(3 * time.Second)) // Set custom timeout for a JetStream API request. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }, nats.MaxWait(2*time.Second)) sub, _ := js.PullSubscribe("foo", "my-durable-name") // Fetch using the default timeout of 3 seconds. msgs, _ := sub.Fetch(1) // Set custom timeout for a pull batch request. msgs, _ = sub.Fetch(1, nats.MaxWait(2*time.Second)) for _, msg := range msgs { msg.Ack() } } func ExampleAckWait() { nc, _ := nats.Connect("localhost") js, _ := nc.JetStream() // Set custom timeout for a JetStream API request. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) // Wait for an ack response for 2 seconds. js.Publish("foo", []byte("Hello JS!"), nats.AckWait(2*time.Second)) // Create consumer on 'foo' subject that waits for an ack for 10s, // after which the message will be delivered. sub, _ := js.SubscribeSync("foo", nats.AckWait(10*time.Second)) msg, _ := sub.NextMsg(2 * time.Second) // Wait for ack of ack for 2s. msg.AckSync(nats.AckWait(2 * time.Second)) } func ExampleMsg_AckSync() { nc, _ := nats.Connect("localhost") js, _ := nc.JetStream() // Set custom timeout for a JetStream API request. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) sub, _ := js.SubscribeSync("foo") msg, _ := sub.NextMsg(2 * time.Second) // Wait for ack of an ack. msg.AckSync() } // When a message has been delivered by JetStream, it will be possible // to access some of its metadata such as sequence numbers. func ExampleMsg_Metadata() { nc, _ := nats.Connect("localhost") js, _ := nc.JetStream() // Set custom timeout for a JetStream API request. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) js.Publish("foo", []byte("hello")) sub, _ := js.SubscribeSync("foo") msg, _ := sub.NextMsg(2 * time.Second) // meta, _ := msg.Metadata() // Stream and Consumer sequences. fmt.Printf("Stream seq: %s:%d, Consumer seq: %s:%d\n", meta.Stream, meta.Sequence.Stream, meta.Consumer, meta.Sequence.Consumer) fmt.Printf("Pending: %d\n", meta.NumPending) fmt.Printf("Pending: %d\n", meta.NumDelivered) } // AckOpt are the options that can be passed when acknowledge a message. func ExampleAckOpt() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Create JetStream context to produce/consumer messages that will be persisted. js, err := nc.JetStream() if err != nil { log.Fatal(err) } // Create stream to persist messages published on 'foo'. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) // Publish is synchronous by default, and waits for a PubAck response. js.Publish("foo", []byte("Hello JS!")) sub, _ := js.SubscribeSync("foo") msg, _ := sub.NextMsg(2 * time.Second) // Ack and wait for 2 seconds msg.InProgress(nats.AckWait(2)) // Using a context. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() msg.Ack(nats.Context(ctx)) } func ExamplePullOpt() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } // Create JetStream context to produce/consumer messages that will be persisted. js, err := nc.JetStream() if err != nil { log.Fatal(err) } // Create stream to persist messages published on 'foo'. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }) // Publish is synchronous by default, and waits for a PubAck response. js.Publish("foo", []byte("Hello JS!")) sub, _ := js.PullSubscribe("foo", "wq") // Pull one message, msgs, _ := sub.Fetch(1, nats.MaxWait(2*time.Second)) for _, msg := range msgs { msg.Ack() } // Using a context to timeout waiting for a message. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() msgs, _ = sub.Fetch(1, nats.Context(ctx)) for _, msg := range msgs { msg.Ack() } } func ExampleContext() { nc, err := nats.Connect("localhost") if err != nil { log.Fatal(err) } js, _ := nc.JetStream() // Base context ctx, cancel := context.WithCancel(context.Background()) defer cancel() // nats.Context option implements context.Context interface, so can be used // to create a new context from top level one. nctx := nats.Context(ctx) // JetStreamManager functions all can use context. js.AddStream(&nats.StreamConfig{ Name: "FOO", Subjects: []string{"foo"}, }, nctx) // Custom context with timeout tctx, tcancel := context.WithTimeout(nctx, 2*time.Second) defer tcancel() // Set a timeout for publishing using context. deadlineCtx := nats.Context(tctx) js.Publish("foo", []byte("Hello JS!"), deadlineCtx) sub, _ := js.SubscribeSync("foo") msg, _ := sub.NextMsgWithContext(deadlineCtx) // Acks can also use a context to await for a response. msg.Ack(deadlineCtx) } nats.go-1.41.0/examples/000077500000000000000000000000001477351342400147765ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/000077500000000000000000000000001477351342400167745ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-consume/000077500000000000000000000000001477351342400210575ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-consume/main.go000066400000000000000000000040411477351342400223310ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "os" "os/signal" "syscall" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "TestConsumerConsume", AckPolicy: jetstream.AckExplicitPolicy, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) cc, err := cons.Consume(func(msg jetstream.Msg) { fmt.Println(string(msg.Data())) msg.Ack() }, jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { fmt.Println(err) })) if err != nil { log.Fatal(err) } defer cc.Stop() sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) <-sig } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-fetch/000077500000000000000000000000001477351342400204775ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-fetch/main.go000066400000000000000000000037031477351342400217550ustar00rootroot00000000000000// Copyright 2020-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "TestConsumerListener", AckPolicy: jetstream.AckExplicitPolicy, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) for { msgs, err := cons.Fetch(100, jetstream.FetchMaxWait(1*time.Second)) if err != nil { fmt.Println(err) } for msg := range msgs.Messages() { fmt.Println(string(msg.Data())) msg.Ack() } if msgs.Error() != nil { fmt.Println("Error fetching messages: ", err) } } } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-messages/000077500000000000000000000000001477351342400212155ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-messages/main.go000066400000000000000000000035761477351342400225030ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "TestConsumerMessages", AckPolicy: jetstream.AckExplicitPolicy, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) it, err := cons.Messages(jetstream.PullMaxMessages(1)) if err != nil { log.Fatal(err) } for { msg, err := it.Next() if err != nil { fmt.Println("next err: ", err) } fmt.Println(string(msg.Data())) msg.Ack() } } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-next/000077500000000000000000000000001477351342400203645ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-next/main.go000066400000000000000000000034501477351342400216410ustar00rootroot00000000000000// Copyright 2020-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "TestConsumerListener", AckPolicy: jetstream.AckExplicitPolicy, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) for { msg, err := cons.Next() if err != nil { fmt.Println(err) continue } fmt.Println(string(msg.Data())) msg.Ack() } } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-ordered-consume/000077500000000000000000000000001477351342400225015ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-ordered-consume/main.go000066400000000000000000000035531477351342400237620ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "os" "os/signal" "syscall" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{ MaxResetAttempts: 5, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) _, err = cons.Consume(func(msg jetstream.Msg) { fmt.Println(string(msg.Data())) msg.Ack() }) if err != nil { log.Fatal(err) } sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) <-sig } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-ordered-fetch/000077500000000000000000000000001477351342400221215ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-ordered-fetch/main.go000066400000000000000000000035441477351342400234020ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{ MaxResetAttempts: 5, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) for { msgs, err := cons.Fetch(100) if err != nil { fmt.Println(err) } for msg := range msgs.Messages() { fmt.Println(string(msg.Data())) msg.Ack() } if msgs.Error() != nil { fmt.Println("Error fetching messages: ", err) } } } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-ordered-messages/000077500000000000000000000000001477351342400226375ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-ordered-messages/main.go000066400000000000000000000034641477351342400241210ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{ MaxResetAttempts: 5, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) it, err := cons.Messages() if err != nil { log.Fatal(err) } defer it.Stop() for { msg, err := it.Next() if err != nil { fmt.Println(err) } fmt.Println(string(msg.Data())) msg.Ack() } } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/jetstream/js-parallel-consume/000077500000000000000000000000001477351342400226515ustar00rootroot00000000000000nats.go-1.41.0/examples/jetstream/js-parallel-consume/main.go000066400000000000000000000042511477351342400241260ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "os" "os/signal" "syscall" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) defer cancel() nc, err := nats.Connect("nats://127.0.0.1:4222") if err != nil { log.Fatal(err) } js, err := jetstream.New(nc) if err != nil { log.Fatal(err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST_STREAM", Subjects: []string{"FOO.*"}, }) if err != nil { log.Fatal(err) } cons, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "TestConsumerParallelConsume", AckPolicy: jetstream.AckExplicitPolicy, }) if err != nil { log.Fatal(err) } go endlessPublish(ctx, nc, js) for i := 0; i < 5; i++ { cc, err := cons.Consume(func(consumeID int) jetstream.MessageHandler { return func(msg jetstream.Msg) { fmt.Printf("Received msg on consume %d\n", consumeID) msg.Ack() } }(i), jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { fmt.Println(err) })) if err != nil { log.Fatal(err) } defer cc.Stop() } sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) <-sig } func endlessPublish(ctx context.Context, nc *nats.Conn, js jetstream.JetStream) { var i int for { time.Sleep(500 * time.Millisecond) if nc.Status() != nats.CONNECTED { continue } if _, err := js.Publish(ctx, "FOO.TEST1", []byte(fmt.Sprintf("msg %d", i))); err != nil { fmt.Println("pub error: ", err) } i++ } } nats.go-1.41.0/examples/nats-bench/000077500000000000000000000000001477351342400170205ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-bench/main.go000066400000000000000000000112611477351342400202740ustar00rootroot00000000000000// Copyright 2015-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "fmt" "log" "os" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/bench" ) // Some sane defaults const ( DefaultNumMsgs = 100000 DefaultNumPubs = 1 DefaultNumSubs = 0 DefaultMessageSize = 128 ) func usage() { log.Printf("Usage: nats-bench [-s server (%s)] [--tls] [-np NUM_PUBLISHERS] [-ns NUM_SUBSCRIBERS] [-n NUM_MSGS] [-ms MESSAGE_SIZE] [-csv csvfile] [-creds file] [-nkey file] \n", nats.DefaultURL) flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } var benchmark *bench.Benchmark func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var tls = flag.Bool("tls", false, "Use TLS Secure Connection") var numPubs = flag.Int("np", DefaultNumPubs, "Number of Concurrent Publishers") var numSubs = flag.Int("ns", DefaultNumSubs, "Number of Concurrent Subscribers") var numMsgs = flag.Int("n", DefaultNumMsgs, "Number of Messages to Publish") var msgSize = flag.Int("ms", DefaultMessageSize, "Size of the message.") var csvFile = flag.String("csv", "", "Save bench data to csv file") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) != 1 { showUsageAndExit(1) } if *numMsgs <= 0 { log.Fatal("Number of messages should be greater than zero.") } // Connect Options. opts := []nats.Option{nats.Name("NATS Benchmark")} if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Use TLS specified if *tls { opts = append(opts, nats.Secure(nil)) } benchmark = bench.NewBenchmark("NATS", *numSubs, *numPubs) var startwg sync.WaitGroup var donewg sync.WaitGroup donewg.Add(*numPubs + *numSubs) // Run Subscribers first startwg.Add(*numSubs) for i := 0; i < *numSubs; i++ { nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatalf("Can't connect: %v\n", err) } defer nc.Close() go runSubscriber(nc, &startwg, &donewg, *numMsgs, *msgSize) } startwg.Wait() // Now Publishers startwg.Add(*numPubs) pubCounts := bench.MsgsPerClient(*numMsgs, *numPubs) for i := 0; i < *numPubs; i++ { nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatalf("Can't connect: %v\n", err) } defer nc.Close() go runPublisher(nc, &startwg, &donewg, pubCounts[i], *msgSize) } log.Printf("Starting benchmark [msgs=%d, msgsize=%d, pubs=%d, subs=%d]\n", *numMsgs, *msgSize, *numPubs, *numSubs) startwg.Wait() donewg.Wait() benchmark.Close() fmt.Print(benchmark.Report()) if len(*csvFile) > 0 { csv := benchmark.CSV() os.WriteFile(*csvFile, []byte(csv), 0644) fmt.Printf("Saved metric data in csv file %s\n", *csvFile) } } func runPublisher(nc *nats.Conn, startwg, donewg *sync.WaitGroup, numMsgs int, msgSize int) { startwg.Done() args := flag.Args() subj := args[0] var msg []byte if msgSize > 0 { msg = make([]byte, msgSize) } start := time.Now() for i := 0; i < numMsgs; i++ { nc.Publish(subj, msg) } nc.Flush() benchmark.AddPubSample(bench.NewSample(numMsgs, msgSize, start, time.Now(), nc)) donewg.Done() } func runSubscriber(nc *nats.Conn, startwg, donewg *sync.WaitGroup, numMsgs int, msgSize int) { args := flag.Args() subj := args[0] received := 0 ch := make(chan time.Time, 2) sub, _ := nc.Subscribe(subj, func(msg *nats.Msg) { received++ if received == 1 { ch <- time.Now() } if received >= numMsgs { ch <- time.Now() } }) sub.SetPendingLimits(-1, -1) nc.Flush() startwg.Done() start := <-ch end := <-ch benchmark.AddSubSample(bench.NewSample(numMsgs, msgSize, start, end, nc)) nc.Close() donewg.Done() } nats.go-1.41.0/examples/nats-echo/000077500000000000000000000000001477351342400166575ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-echo/main.go000066400000000000000000000122551477351342400201370ustar00rootroot00000000000000// Copyright 2018-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "encoding/json" "flag" "fmt" "io" "log" "net/http" "os" "os/signal" "runtime" "syscall" "time" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-echo -s demo.nats.io func usage() { log.Printf("Usage: nats-echo [-s server] [-creds file] [-t] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func printMsg(m *nats.Msg, i int) { log.Printf("[#%d] Echoing from [%s] to [%s]: %q", i, m.Subject, m.Reply, m.Data) } func printStatusMsg(m *nats.Msg, i int) { log.Printf("[#%d] Sending status from [%s] to [%s]: %q", i, m.Subject, m.Reply, m.Data) } type serviceStatus struct { Id string `json:"id"` Geo string `json:"geo"` } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var serviceId = flag.String("id", "NATS Echo Service", "Identifier for this service") var showTime = flag.Bool("t", false, "Display timestamps") var showHelp = flag.Bool("h", false, "Show help message") var geoloc = flag.Bool("geo", false, "Display geo location of echo service") var geo string = "unknown" log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) != 1 { showUsageAndExit(1) } // Lookup geo if requested if *geoloc { geo = lookupGeo() } // Connect Options. opts := []nats.Option{nats.Name(*serviceId)} opts = setupConnOptions(opts) if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } subj, iEcho, iStatus := args[0], 0, 0 statusSubj := subj + ".status" nc.QueueSubscribe(subj, "echo", func(msg *nats.Msg) { iEcho++ printMsg(msg, iEcho) if msg.Reply != "" { // Just echo back what they sent us. var payload []byte if geo != "unknown" { payload = []byte(fmt.Sprintf("[%s]: %q", geo, msg.Data)) } else { payload = msg.Data } nc.Publish(msg.Reply, payload) } }) nc.Subscribe(statusSubj, func(msg *nats.Msg) { iStatus++ printStatusMsg(msg, iStatus) if msg.Reply != "" { payload, _ := json.Marshal(&serviceStatus{Id: *serviceId, Geo: geo}) nc.Publish(msg.Reply, payload) } }) nc.Flush() if err := nc.LastError(); err != nil { log.Fatal(err) } log.Printf("Echo Service ID: [%s]", *serviceId) log.Printf("Echo Service listening on [%s]\n", subj) log.Printf("Echo Service (Status) listening on [%s]\n", statusSubj) // Now handle signal to terminate so we can drain on exit. c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT) go func() { // Wait for signal <-c log.Printf("") nc.Drain() }() if *showTime { log.SetFlags(log.LstdFlags) } runtime.Goexit() } func setupConnOptions(opts []nats.Option) []nats.Option { totalWait := 10 * time.Minute reconnectDelay := time.Second opts = append(opts, nats.ReconnectWait(reconnectDelay)) opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { if !nc.IsClosed() { log.Printf("Disconnected due to: %s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) } })) opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { log.Printf("Reconnected [%s]", nc.ConnectedUrl()) })) opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { if !nc.IsClosed() { log.Fatal("Exiting: no servers available") } else { log.Fatal("Exiting") } })) return opts } // We only want region, country type geo struct { // There are others.. Region string Country string } // lookup our current region and country.. func lookupGeo() string { c := &http.Client{Timeout: 2 * time.Second} url := os.Getenv("ECHO_SVC_GEO_URL") if len(url) == 0 { url = "https://ipapi.co/json" } resp, err := c.Get(url) if err != nil || resp == nil { log.Fatalf("Could not retrieve geo location data: %v", err) } defer resp.Body.Close() body, _ := io.ReadAll(resp.Body) g := geo{} if err := json.Unmarshal(body, &g); err != nil { log.Fatalf("Error unmarshalling geo: %v", err) } return g.Region + ", " + g.Country } nats.go-1.41.0/examples/nats-pub/000077500000000000000000000000001477351342400165275ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-pub/main.go000066400000000000000000000055121477351342400200050ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "log" "os" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-pub -s demo.nats.io func usage() { log.Printf("Usage: nats-pub [-s server] [-creds file] [-nkey file] [-tlscert file] [-tlskey file] [-tlscacert file] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var tlsClientCert = flag.String("tlscert", "", "TLS client certificate file") var tlsClientKey = flag.String("tlskey", "", "Private key file for client certificate") var tlsCACert = flag.String("tlscacert", "", "CA certificate to verify peer against") var reply = flag.String("reply", "", "Sets a specific reply subject") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) != 2 { showUsageAndExit(1) } // Connect Options. opts := []nats.Option{nats.Name("NATS Sample Publisher")} if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use TLS client authentication if *tlsClientCert != "" && *tlsClientKey != "" { opts = append(opts, nats.ClientCert(*tlsClientCert, *tlsClientKey)) } // Use specific CA certificate if *tlsCACert != "" { opts = append(opts, nats.RootCAs(*tlsCACert)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } defer nc.Close() subj, msg := args[0], []byte(args[1]) if reply != nil && *reply != "" { nc.PublishRequest(subj, *reply, msg) } else { nc.Publish(subj, msg) } nc.Flush() if err := nc.LastError(); err != nil { log.Fatal(err) } else { log.Printf("Published [%s] : '%s'\n", subj, msg) } } nats.go-1.41.0/examples/nats-qsub/000077500000000000000000000000001477351342400167135ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-qsub/main.go000066400000000000000000000066071477351342400201770ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "log" "os" "os/signal" "time" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-qsub -s demo.nats.io func usage() { log.Printf("Usage: nats-qsub [-s server] [-creds file] [-nkey file] [-t] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func printMsg(m *nats.Msg, i int) { log.Printf("[#%d] Received on [%s] Queue[%s] Pid[%d]: '%s'", i, m.Subject, m.Sub.Queue, os.Getpid(), string(m.Data)) } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var showTime = flag.Bool("t", false, "Display timestamps") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) != 2 { showUsageAndExit(1) } // Connect Options. opts := []nats.Option{nats.Name("NATS Sample Queue Subscriber")} opts = setupConnOptions(opts) if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } subj, queue, i := args[0], args[1], 0 nc.QueueSubscribe(subj, queue, func(msg *nats.Msg) { i++ printMsg(msg, i) }) nc.Flush() if err := nc.LastError(); err != nil { log.Fatal(err) } log.Printf("Listening on [%s], queue group [%s]", subj, queue) if *showTime { log.SetFlags(log.LstdFlags) } // Setup the interrupt handler to drain so we don't miss // requests when scaling down. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) <-c log.Println() log.Printf("Draining...") nc.Drain() log.Fatalf("Exiting") } func setupConnOptions(opts []nats.Option) []nats.Option { totalWait := 10 * time.Minute reconnectDelay := time.Second opts = append(opts, nats.ReconnectWait(reconnectDelay)) opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { log.Printf("Disconnected due to: %s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) })) opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { log.Printf("Reconnected [%s]", nc.ConnectedUrl()) })) opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { log.Fatalf("Exiting: %v", nc.LastError()) })) return opts } nats.go-1.41.0/examples/nats-req/000077500000000000000000000000001477351342400165305ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-req/main.go000066400000000000000000000044261477351342400200110ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "log" "os" "time" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-req -s demo.nats.io func usage() { log.Printf("Usage: nats-req [-s server] [-creds file] [-nkey file] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) < 2 { showUsageAndExit(1) } // Connect Options. opts := []nats.Option{nats.Name("NATS Sample Requestor")} if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } defer nc.Close() subj, payload := args[0], []byte(args[1]) msg, err := nc.Request(subj, payload, 2*time.Second) if err != nil { if nc.LastError() != nil { log.Fatalf("%v for request", nc.LastError()) } log.Fatalf("%v for request", err) } log.Printf("Published [%s] : '%s'", subj, payload) log.Printf("Received [%v] : '%s'", msg.Subject, string(msg.Data)) } nats.go-1.41.0/examples/nats-rply/000077500000000000000000000000001477351342400167275ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-rply/main.go000066400000000000000000000066651477351342400202170ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "log" "os" "os/signal" "time" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-rply -s demo.nats.io func usage() { log.Printf("Usage: nats-rply [-s server] [-creds file] [-nkey file] [-t] [-q queue] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func printMsg(m *nats.Msg, i int) { log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, string(m.Data)) } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var showTime = flag.Bool("t", false, "Display timestamps") var queueName = flag.String("q", "NATS-RPLY-22", "Queue Group Name") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) < 2 { showUsageAndExit(1) } // Connect Options. opts := []nats.Option{nats.Name("NATS Sample Responder")} opts = setupConnOptions(opts) if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } subj, reply, i := args[0], args[1], 0 nc.QueueSubscribe(subj, *queueName, func(msg *nats.Msg) { i++ printMsg(msg, i) msg.Respond([]byte(reply)) }) nc.Flush() if err := nc.LastError(); err != nil { log.Fatal(err) } log.Printf("Listening on [%s]", subj) if *showTime { log.SetFlags(log.LstdFlags) } // Setup the interrupt handler to drain so we don't miss // requests when scaling down. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) <-c log.Println() log.Printf("Draining...") nc.Drain() log.Fatalf("Exiting") } func setupConnOptions(opts []nats.Option) []nats.Option { totalWait := 10 * time.Minute reconnectDelay := time.Second opts = append(opts, nats.ReconnectWait(reconnectDelay)) opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { log.Printf("Disconnected due to: %s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) })) opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { log.Printf("Reconnected [%s]", nc.ConnectedUrl()) })) opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { log.Fatalf("Exiting: %v", nc.LastError()) })) return opts } nats.go-1.41.0/examples/nats-sub/000077500000000000000000000000001477351342400165325ustar00rootroot00000000000000nats.go-1.41.0/examples/nats-sub/main.go000066400000000000000000000071571477351342400200170ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "log" "os" "runtime" "time" "github.com/nats-io/nats.go" ) // NOTE: Can test with demo servers. // nats-sub -s demo.nats.io func usage() { log.Printf("Usage: nats-sub [-s server] [-creds file] [-nkey file] [-tlscert file] [-tlskey file] [-tlscacert file] [-t] \n") flag.PrintDefaults() } func showUsageAndExit(exitcode int) { usage() os.Exit(exitcode) } func printMsg(m *nats.Msg, i int) { log.Printf("[#%d] Received on [%s]: '%s'", i, m.Subject, string(m.Data)) } func main() { var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)") var userCreds = flag.String("creds", "", "User Credentials File") var nkeyFile = flag.String("nkey", "", "NKey Seed File") var tlsClientCert = flag.String("tlscert", "", "TLS client certificate file") var tlsClientKey = flag.String("tlskey", "", "Private key file for client certificate") var tlsCACert = flag.String("tlscacert", "", "CA certificate to verify peer against") var showTime = flag.Bool("t", false, "Display timestamps") var showHelp = flag.Bool("h", false, "Show help message") log.SetFlags(0) flag.Usage = usage flag.Parse() if *showHelp { showUsageAndExit(0) } args := flag.Args() if len(args) != 1 { showUsageAndExit(1) } // Connect Options. opts := []nats.Option{nats.Name("NATS Sample Subscriber")} opts = setupConnOptions(opts) if *userCreds != "" && *nkeyFile != "" { log.Fatal("specify -seed or -creds") } // Use UserCredentials if *userCreds != "" { opts = append(opts, nats.UserCredentials(*userCreds)) } // Use TLS client authentication if *tlsClientCert != "" && *tlsClientKey != "" { opts = append(opts, nats.ClientCert(*tlsClientCert, *tlsClientKey)) } // Use specific CA certificate if *tlsCACert != "" { opts = append(opts, nats.RootCAs(*tlsCACert)) } // Use Nkey authentication. if *nkeyFile != "" { opt, err := nats.NkeyOptionFromSeed(*nkeyFile) if err != nil { log.Fatal(err) } opts = append(opts, opt) } // Connect to NATS nc, err := nats.Connect(*urls, opts...) if err != nil { log.Fatal(err) } subj, i := args[0], 0 nc.Subscribe(subj, func(msg *nats.Msg) { i += 1 printMsg(msg, i) }) nc.Flush() if err := nc.LastError(); err != nil { log.Fatal(err) } log.Printf("Listening on [%s]", subj) if *showTime { log.SetFlags(log.LstdFlags) } runtime.Goexit() } func setupConnOptions(opts []nats.Option) []nats.Option { totalWait := 10 * time.Minute reconnectDelay := time.Second opts = append(opts, nats.ReconnectWait(reconnectDelay)) opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { log.Printf("Disconnected due to:%s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) })) opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { log.Printf("Reconnected [%s]", nc.ConnectedUrl()) })) opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { log.Fatalf("Exiting: %v", nc.LastError()) })) return opts } nats.go-1.41.0/go.mod000066400000000000000000000004311477351342400142640ustar00rootroot00000000000000module github.com/nats-io/nats.go go 1.23.0 require ( github.com/klauspost/compress v1.18.0 github.com/nats-io/nkeys v0.4.9 github.com/nats-io/nuid v1.0.1 golang.org/x/text v0.23.0 ) require ( golang.org/x/crypto v0.31.0 // indirect golang.org/x/sys v0.28.0 // indirect ) nats.go-1.41.0/go.sum000066400000000000000000000017221477351342400143150ustar00rootroot00000000000000github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0= github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= nats.go-1.41.0/go_test.mod000066400000000000000000000011711477351342400153250ustar00rootroot00000000000000module github.com/nats-io/nats.go go 1.23.0 require ( github.com/golang/protobuf v1.4.2 github.com/klauspost/compress v1.18.0 github.com/nats-io/jwt v1.2.2 github.com/nats-io/nats-server/v2 v2.11.0 github.com/nats-io/nkeys v0.4.10 github.com/nats-io/nuid v1.0.1 go.uber.org/goleak v1.3.0 golang.org/x/text v0.23.0 google.golang.org/protobuf v1.23.0 ) require ( github.com/google/go-tpm v0.9.3 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/nats-io/jwt/v2 v2.7.3 // indirect golang.org/x/crypto v0.36.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/time v0.11.0 // indirect ) nats.go-1.41.0/go_test.sum000066400000000000000000000132611477351342400153550ustar00rootroot00000000000000github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0= github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-tpm v0.9.3 h1:+yx0/anQuGzi+ssRqeD6WpXjW2L/V0dItUayO0i9sRc= github.com/google/go-tpm v0.9.3/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE= github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4= github.com/nats-io/nats-server/v2 v2.11.0 h1:fdwAT1d6DZW/4LUz5rkvQUe5leGEwjjOQYntzVRKvjE= github.com/nats-io/nats-server/v2 v2.11.0/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc= github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= nats.go-1.41.0/internal/000077500000000000000000000000001477351342400147745ustar00rootroot00000000000000nats.go-1.41.0/internal/parser/000077500000000000000000000000001477351342400162705ustar00rootroot00000000000000nats.go-1.41.0/internal/parser/parse.go000066400000000000000000000061721477351342400177370ustar00rootroot00000000000000// Copyright 2020-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package parser import ( "errors" "fmt" ) const ( AckDomainTokenPos = iota + 2 AckAccHashTokenPos AckStreamTokenPos AckConsumerTokenPos AckNumDeliveredTokenPos AckStreamSeqTokenPos AckConsumerSeqTokenPos AckTimestampSeqTokenPos AckNumPendingTokenPos ) var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject") // Quick parser for positive numbers in ack reply encoding. // NOTE: This parser does not detect uint64 overflow func ParseNum(d string) (n uint64) { if len(d) == 0 { return 0 } // ASCII numbers 0-9 const ( asciiZero = 48 asciiNine = 57 ) for _, dec := range d { if dec < asciiZero || dec > asciiNine { return 0 } n = n*10 + uint64(dec) - asciiZero } return } func GetMetadataFields(subject string) ([]string, error) { v1TokenCounts, v2TokenCounts := 9, 12 var start int tokens := make([]string, 0, v2TokenCounts) for i := 0; i < len(subject); i++ { if subject[i] == '.' { tokens = append(tokens, subject[start:i]) start = i + 1 } } tokens = append(tokens, subject[start:]) // // Newer server will include the domain name and account hash in the subject, // and a token at the end. // // Old subject was: // $JS.ACK....... // // New subject would be: // $JS.ACK.......... // // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since // it may be removed in the future. Also, the library has no use for it. // The point is that a v2 ACK subject is valid if it has at least 11 tokens. // tokensLen := len(tokens) // If lower than 9 or more than 9 but less than 11, report an error if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) { return nil, ErrInvalidSubjectFormat } if tokens[0] != "$JS" || tokens[1] != "ACK" { return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat) } // For v1 style, we insert 2 empty tokens (domain and hash) so that the // rest of the library references known fields at a constant location. if tokensLen == v1TokenCounts { // Extend the array (we know the backend is big enough) tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...) // Clear the domain and hash tokens tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", "" } else if tokens[AckDomainTokenPos] == "_" { // If domain is "_", replace with empty value. tokens[AckDomainTokenPos] = "" } return tokens, nil } nats.go-1.41.0/internal/parser/parse_test.go000066400000000000000000000077611477351342400210030ustar00rootroot00000000000000// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package parser import ( "errors" "math" "reflect" "strconv" "strings" "testing" ) func TestParseNum(t *testing.T) { tests := []struct { name string given string expected uint64 }{ { name: "parse 191817", given: "191817", expected: 191817, }, { name: "parse 0", given: "0", expected: 0, }, { name: "parse 00", given: "00", expected: 0, }, { name: "empty string", given: "", expected: 0, }, { name: "negative number", given: "-123", expected: 0, }, { name: "not a number", given: "abc", expected: 0, }, { name: "max uit64", given: strconv.FormatUint(math.MaxUint64, 10), expected: math.MaxUint64, }, { name: "max uit64 + 2, overflow", given: "18446744073709551617", expected: 1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { res := ParseNum(test.given) if res != test.expected { t.Fatalf("Invalid result; want: %d; got: %d", test.expected, res) } }) } } func FuzzParseNum(f *testing.F) { testcases := []string{"191817", " ", "-123", "abc"} for _, tc := range testcases { f.Add(tc) } f.Fuzz(func(t *testing.T, given string) { given = strings.TrimLeft(given, "+") res := ParseNum(given) parsed, err := strconv.ParseUint(given, 10, 64) if err != nil && !errors.Is(err, strconv.ErrRange) { if res != 0 { t.Errorf("given: %s; expected: -1; got: %d; err: %v", given, res, err) } } else if err == nil && res != parsed { t.Errorf("given: %s; expected: %d; got: %d", given, parsed, res) } }) } func TestGetMetadataFields(t *testing.T) { tests := []struct { name string subject string expected []string withError error }{ { name: "parse v2 tokens", subject: "$JS.ACK.domain.hash-123.stream.cons.100.200.150.123456789.100.token", expected: []string{"$JS", "ACK", "domain", "hash-123", "stream", "cons", "100", "200", "150", "123456789", "100", "token"}, }, { name: "parse v2 tokens with underscore domain", subject: "$JS.ACK._.hash-123.stream.cons.100.200.150.123456789.100.token", expected: []string{"$JS", "ACK", "", "hash-123", "stream", "cons", "100", "200", "150", "123456789", "100", "token"}, }, { name: "parse v1 tokens", subject: "$JS.ACK.stream.cons.100.200.150.123456789.100", expected: []string{"$JS", "ACK", "", "", "stream", "cons", "100", "200", "150", "123456789", "100"}, }, { name: "invalid start of subject", subject: "$ABC.123.stream.cons.100.200.150.123456789.100", withError: ErrInvalidSubjectFormat, }, { name: "invalid subject length (10)", subject: "$JS.ACK.stream.cons.100.200.150.123456789.100.ABC", withError: ErrInvalidSubjectFormat, }, { name: "invalid subject length (5)", subject: "$JS.ACK.stream.cons.100", withError: ErrInvalidSubjectFormat, }, { name: "invalid subject ", subject: "", withError: ErrInvalidSubjectFormat, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { res, err := GetMetadataFields(test.subject) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if !reflect.DeepEqual(test.expected, res) { t.Fatalf("Invalid result; want: %v; got: %v", test.expected, res) } }) } } nats.go-1.41.0/internal/syncx/000077500000000000000000000000001477351342400161405ustar00rootroot00000000000000nats.go-1.41.0/internal/syncx/map.go000066400000000000000000000034441477351342400172510ustar00rootroot00000000000000// Copyright 2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package syncx import "sync" // Map is a type-safe wrapper around sync.Map. // It is safe for concurrent use. // The zero value of Map is an empty map ready to use. type Map[K comparable, V any] struct { m sync.Map } func (m *Map[K, V]) Load(key K) (V, bool) { v, ok := m.m.Load(key) if !ok { var empty V return empty, false } return v.(V), true } func (m *Map[K, V]) Store(key K, value V) { m.m.Store(key, value) } func (m *Map[K, V]) Delete(key K) { m.m.Delete(key) } func (m *Map[K, V]) Range(f func(key K, value V) bool) { m.m.Range(func(key, value any) bool { return f(key.(K), value.(V)) }) } func (m *Map[K, V]) LoadOrStore(key K, value V) (V, bool) { v, loaded := m.m.LoadOrStore(key, value) return v.(V), loaded } func (m *Map[K, V]) LoadAndDelete(key K) (V, bool) { v, ok := m.m.LoadAndDelete(key) if !ok { var empty V return empty, false } return v.(V), true } func (m *Map[K, V]) CompareAndSwap(key K, old, new V) bool { return m.m.CompareAndSwap(key, old, new) } func (m *Map[K, V]) CompareAndDelete(key K, value V) bool { return m.m.CompareAndDelete(key, value) } func (m *Map[K, V]) Swap(key K, value V) (V, bool) { previous, loaded := m.m.Swap(key, value) return previous.(V), loaded } nats.go-1.41.0/internal/syncx/map_test.go000066400000000000000000000066401477351342400203110ustar00rootroot00000000000000// Copyright 2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package syncx import ( "testing" ) func TestMapLoad(t *testing.T) { var m Map[int, string] m.Store(1, "one") v, ok := m.Load(1) if !ok || v != "one" { t.Errorf("Load(1) = %v, %v; want 'one', true", v, ok) } v, ok = m.Load(2) if ok || v != "" { t.Errorf("Load(2) = %v, %v; want '', false", v, ok) } } func TestMapStore(t *testing.T) { var m Map[int, string] m.Store(1, "one") v, ok := m.Load(1) if !ok || v != "one" { t.Errorf("Load(1) after Store(1, 'one') = %v, %v; want 'one', true", v, ok) } } func TestMapDelete(t *testing.T) { var m Map[int, string] m.Store(1, "one") m.Delete(1) v, ok := m.Load(1) if ok || v != "" { t.Errorf("Load(1) after Delete(1) = %v, %v; want '', false", v, ok) } } func TestMapRange(t *testing.T) { var m Map[int, string] m.Store(1, "one") m.Store(2, "two") var keys []int var values []string m.Range(func(key int, value string) bool { keys = append(keys, key) values = append(values, value) return true }) if len(keys) != 2 || len(values) != 2 { t.Errorf("Range() keys = %v, values = %v; want 2 keys and 2 values", keys, values) } } func TestMapLoadOrStore(t *testing.T) { var m Map[int, string] v, loaded := m.LoadOrStore(1, "one") if loaded || v != "one" { t.Errorf("LoadOrStore(1, 'one') = %v, %v; want 'one', false", v, loaded) } v, loaded = m.LoadOrStore(1, "uno") if !loaded || v != "one" { t.Errorf("LoadOrStore(1, 'uno') = %v, %v; want 'one', true", v, loaded) } } func TestMapLoadAndDelete(t *testing.T) { var m Map[int, string] m.Store(1, "one") v, ok := m.LoadAndDelete(1) if !ok || v != "one" { t.Errorf("LoadAndDelete(1) = %v, %v; want 'one', true", v, ok) } v, ok = m.Load(1) if ok || v != "" { t.Errorf("Load(1) after LoadAndDelete(1) = %v, %v; want '', false", v, ok) } // Test that LoadAndDelete on a missing key returns the zero value. v, ok = m.LoadAndDelete(2) if ok || v != "" { t.Errorf("LoadAndDelete(2) = %v, %v; want '', false", v, ok) } } func TestMapCompareAndSwap(t *testing.T) { var m Map[int, string] m.Store(1, "one") ok := m.CompareAndSwap(1, "one", "uno") if !ok { t.Errorf("CompareAndSwap(1, 'one', 'uno') = false; want true") } v, _ := m.Load(1) if v != "uno" { t.Errorf("Load(1) after CompareAndSwap = %v; want 'uno'", v) } } func TestMapCompareAndDelete(t *testing.T) { var m Map[int, string] m.Store(1, "one") ok := m.CompareAndDelete(1, "one") if !ok { t.Errorf("CompareAndDelete(1, 'one') = false; want true") } v, _ := m.Load(1) if v != "" { t.Errorf("Load(1) after CompareAndDelete = %v; want ''", v) } } func TestMapSwap(t *testing.T) { var m Map[int, string] m.Store(1, "one") v, loaded := m.Swap(1, "uno") if !loaded || v != "one" { t.Errorf("Swap(1, 'uno') = %v, %v; want 'one', true", v, loaded) } v, _ = m.Load(1) if v != "uno" { t.Errorf("Load(1) after Swap = %v; want 'uno'", v) } } nats.go-1.41.0/jetstream/000077500000000000000000000000001477351342400151565ustar00rootroot00000000000000nats.go-1.41.0/jetstream/README.md000066400000000000000000001025531477351342400164430ustar00rootroot00000000000000 # JetStream Simplified Client [![JetStream API Reference](https://pkg.go.dev/badge/github.com/nats-io/nats.go/jetstream.svg)](https://pkg.go.dev/github.com/nats-io/nats.go/jetstream) This doc covers the basic usage of the `jetstream` package in `nats.go` client. - [Overview](#overview) - [Basic usage](#basic-usage) - [Streams](#streams) - [Stream management (CRUD)](#stream-management-crud) - [Listing streams and stream names](#listing-streams-and-stream-names) - [Stream-specific operations](#stream-specific-operations) - [Consumers](#consumers) - [Consumers management](#consumers-management) - [Listing consumers and consumer names](#listing-consumers-and-consumer-names) - [Ordered consumers](#ordered-consumers) - [Receiving messages from the consumer](#receiving-messages-from-the-consumer) - [Single fetch](#single-fetch) - [Continuous polling](#continuous-polling) - [Using `Consume()` receive messages in a callback](#using-consume-receive-messages-in-a-callback) - [Using `Messages()` to iterate over incoming messages](#using-messages-to-iterate-over-incoming-messages) - [Publishing on stream](#publishing-on-stream) - [Synchronous publish](#synchronous-publish) - [Async publish](#async-publish) - [KeyValue Store](#keyvalue-store) - [Basic usage of KV bucket](#basic-usage-of-kv-bucket) - [Watching for changes on a bucket](#watching-for-changes-on-a-bucket) - [Additional operations on a bucket](#additional-operations-on-a-bucket) - [Object Store](#object-store) - [Basic usage of Object Store](#basic-usage-of-object-store) - [Watching for changes on a store](#watching-for-changes-on-a-store) - [Additional operations on a store](#additional-operations-on-a-store) - [Examples](#examples) ## Overview `jetstream` package is a new client API to interact with NATS JetStream, aiming to replace the JetStream client implementation from `nats` package. The main goal of this package is to provide a simple and clear way to interact with JetStream API. Key differences between `jetstream` and `nats` packages include: - Using smaller, simpler interfaces to manage streams and consumers - Using more granular and predictable approach to consuming messages from a stream, instead of relying on often complicated and unpredictable `Subscribe()` method (and all of its flavors) - Allowing the usage of pull consumers to continuously receive incoming messages (including ordered consumer functionality) - Separating JetStream context from core NATS `jetstream` package provides several ways of interacting with the API: - `JetStream` - top-level interface, used to create and manage streams, consumers and publishing messages - `Stream` - used to manage consumers for a specific stream, as well as performing stream-specific operations (purging, fetching and deleting messages by sequence number, fetching stream info) - `Consumer` - used to get information about a consumer as well as consuming messages - `Msg` - used for message-specific operations - reading data, headers and metadata, as well as performing various types of acknowledgements Additionally, `jetstream` exposes [KeyValue Store](#keyvalue-store) and [ObjectStore](#object-store) capabilities. KV and Object stores are abstraction layers on top of JetStream Streams, simplifying key value and large data storage on Streams. > __NOTE__: `jetstream` requires nats-server >= 2.9.0 to work correctly. ## Basic usage ```go package main import ( "context" "fmt" "strconv" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func main() { // In the `jetstream` package, almost all API calls rely on `context.Context` for timeout/cancellation handling ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() nc, _ := nats.Connect(nats.DefaultURL) // Create a JetStream management interface js, _ := jetstream.New(nc) // Create a stream s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "ORDERS", Subjects: []string{"ORDERS.*"}, }) // Publish some messages for i := 0; i < 100; i++ { js.Publish(ctx, "ORDERS.new", []byte("hello message "+strconv.Itoa(i))) fmt.Printf("Published hello message %d\n", i) } // Create durable consumer c, _ := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "CONS", AckPolicy: jetstream.AckExplicitPolicy, }) // Get 10 messages from the consumer messageCounter := 0 msgs, err := c.Fetch(10) if err != nil { // handle error } for msg := range msgs.Messages() { msg.Ack() fmt.Printf("Received a JetStream message via fetch: %s\n", string(msg.Data())) messageCounter++ } fmt.Printf("received %d messages\n", messageCounter) if msgs.Error() != nil { fmt.Println("Error during Fetch(): ", msgs.Error()) } // Receive messages continuously in a callback cons, _ := c.Consume(func(msg jetstream.Msg) { msg.Ack() fmt.Printf("Received a JetStream message via callback: %s\n", string(msg.Data())) messageCounter++ }) defer cons.Stop() // Iterate over messages continuously it, _ := c.Messages() for i := 0; i < 10; i++ { msg, _ := it.Next() msg.Ack() fmt.Printf("Received a JetStream message via iterator: %s\n", string(msg.Data())) messageCounter++ } it.Stop() // block until all 100 published messages have been processed for messageCounter < 100 { time.Sleep(10 * time.Millisecond) } } ``` ## Streams `jetstream` provides methods to manage and list streams, as well as perform stream-specific operations (purging, fetching/deleting messages by sequence id) ### Stream management (CRUD) ```go js, _ := jetstream.New(nc) // create a stream (this is an idempotent operation) s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "ORDERS", Subjects: []string{"ORDERS.*"}, }) // update a stream s, _ = js.UpdateStream(ctx, jetstream.StreamConfig{ Name: "ORDERS", Subjects: []string{"ORDERS.*"}, Description: "updated stream", }) // get stream handle s, _ = js.Stream(ctx, "ORDERS") // delete a stream js.DeleteStream(ctx, "ORDERS") ``` ### Listing streams and stream names ```go // list streams streams := js.ListStreams(ctx) for s := range streams.Info() { fmt.Println(s.Config.Name) } if streams.Err() != nil { fmt.Println("Unexpected error occurred") } // list stream names names := js.StreamNames(ctx) for name := range names.Name() { fmt.Println(name) } if names.Err() != nil { fmt.Println("Unexpected error occurred") } ``` ### Stream-specific operations Using `Stream` interface, it is also possible to: - Purge a stream ```go // remove all messages from a stream _ = s.Purge(ctx) // remove all messages from a stream that are stored on a specific subject _ = s.Purge(ctx, jetstream.WithPurgeSubject("ORDERS.new")) // remove all messages up to specified sequence number _ = s.Purge(ctx, jetstream.WithPurgeSequence(100)) // remove messages, but keep 10 newest _ = s.Purge(ctx, jetstream.WithPurgeKeep(10)) ``` - Get and messages from stream ```go // get message from stream with sequence number == 100 msg, _ := s.GetMsg(ctx, 100) // get last message from "ORDERS.new" subject msg, _ = s.GetLastMsgForSubject(ctx, "ORDERS.new") // delete a message with sequence number == 100 _ = s.DeleteMsg(ctx, 100) ``` - Get information about a stream ```go // Fetches latest stream info from server info, _ := s.Info(ctx) fmt.Println(info.Config.Name) // Returns the most recently fetched StreamInfo, without making an API call to the server cachedInfo := s.CachedInfo() fmt.Println(cachedInfo.Config.Name) ``` ## Consumers Only pull consumers are supported in `jetstream` package. However, unlike the JetStream API in `nats` package, pull consumers allow for continuous message retrieval (similarly to how `nats.Subscribe()` works). Because of that, push consumers can be easily replaced by pull consumers for most of the use cases. ### Consumers management CRUD operations on consumers can be achieved on 2 levels: - on `JetStream` interface ```go js, _ := jetstream.New(nc) // create a consumer (this is an idempotent operation) // an error will be returned if consumer already exists and has different configuration. cons, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ Durable: "foo", AckPolicy: jetstream.AckExplicitPolicy, }) // create an ephemeral pull consumer by not providing `Durable` ephemeral, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ AckPolicy: jetstream.AckExplicitPolicy, }) // consumer can also be created using CreateOrUpdateConsumer // this method will either create a consumer if it does not exist // or update existing consumer (if possible) cons2 := js.CreateOrUpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ Name: "bar", }) // consumers can be updated // an error will be returned if consumer with given name does not exist // or an illegal property is to be updated (e.g. AckPolicy) updated, _ := js.UpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ AckPolicy: jetstream.AckExplicitPolicy, Description: "updated consumer" }) // get consumer handle cons, _ = js.Consumer(ctx, "ORDERS", "foo") // delete a consumer js.DeleteConsumer(ctx, "ORDERS", "foo") ``` - on `Stream` interface ```go // Create a JetStream management interface js, _ := jetstream.New(nc) // get stream handle stream, _ := js.Stream(ctx, "ORDERS") // create consumer cons, _ := stream.CreateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "foo", AckPolicy: jetstream.AckExplicitPolicy, }) // get consumer handle cons, _ = stream.Consumer(ctx, "ORDERS", "foo") // delete a consumer stream.DeleteConsumer(ctx, "foo") ``` `Consumer` interface, returned when creating/fetching consumers, allows fetching `ConsumerInfo`: ```go // Fetches latest consumer info from server info, _ := cons.Info(ctx) fmt.Println(info.Config.Durable) // Returns the most recently fetched ConsumerInfo, without making an API call to the server cachedInfo := cons.CachedInfo() fmt.Println(cachedInfo.Config.Durable) ``` ### Listing consumers and consumer names ```go // list consumers consumers := s.ListConsumers(ctx) for cons := range consumers.Info() { fmt.Println(cons.Name) } if consumers.Err() != nil { fmt.Println("Unexpected error occurred") } // list consumer names names := s.ConsumerNames(ctx) for name := range names.Name() { fmt.Println(name) } if names.Err() != nil { fmt.Println("Unexpected error occurred") } ``` ### Ordered consumers `jetstream`, in addition to basic named/ephemeral consumers, supports ordered consumer functionality. Ordered is strictly processing messages in the order that they were stored on the stream, providing a consistent and deterministic message ordering. It is also resilient to consumer deletion. Ordered consumers present the same set of message consumption methods as standard pull consumers. ```go js, _ := jetstream.New(nc) // create a consumer (this is an idempotent operation) cons, _ := js.OrderedConsumer(ctx, "ORDERS", jetstream.OrderedConsumerConfig{ // Filter results from "ORDERS" stream by specific subject FilterSubjects: []{"ORDERS.A"}, }) ``` ### Receiving messages from the consumer The `Consumer` interface covers allows fetching messages on demand, with pre-defined batch size on bytes limit, or continuous push-like receiving of messages. #### __Single fetch__ This pattern pattern allows fetching a defined number of messages in a single RPC. - Using `Fetch` or `FetchBytes`, consumer will return up to the provided number of messages/bytes. By default, `Fetch()` will wait 30 seconds before timing out (this behavior can be configured using `FetchMaxWait()` option): ```go // receive up to 10 messages from the stream msgs, err := c.Fetch(10) if err != nil { // handle error } for msg := range msgs.Messages() { fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) } if msgs.Error() != nil { // handle error } // receive up to 1024 B of data msgs, err := c.FetchBytes(1024) if err != nil { // handle error } for msg := range msgs.Messages() { fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) } if msgs.Error() != nil { // handle error } ``` Similarly, `FetchNoWait()` can be used in order to only return messages from the stream available at the time of sending request: ```go // FetchNoWait will not wait for new messages if the whole batch is not available at the time of sending request. msgs, err := c.FetchNoWait(10) if err != nil { // handle error } for msg := range msgs.Messages() { fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) } if msgs.Error() != nil { // handle error } ``` > __Warning__: Both `Fetch()` and `FetchNoWait()` have worse performance when > used to continuously retrieve messages in comparison to `Messages()` or `Consume()` methods, as they do not perform any optimizations (pre-buffering) and new subscription is created for each execution. #### Continuous polling There are 2 ways to achieve push-like behavior using pull consumers in `jetstream` package. Both `Messages()` and `Consume()` methods perform similar optimizations and for most cases can be used interchangeably. There is an advantage of using `Messages()` instead of `Consume()` for work-queue scenarios, where messages should be fetched one by one, as it allows for finer control over fetching single messages on demand. Subject filtering is achieved by configuring a consumer with a `FilterSubject` value. ##### Using `Consume()` receive messages in a callback ```go cons, _ := js.CreateOrUpdateConsumer("ORDERS", jetstream.ConsumerConfig{ AckPolicy: jetstream.AckExplicitPolicy, // receive messages from ORDERS.A subject only FilterSubject: "ORDERS.A" })) consContext, _ := c.Consume(func(msg jetstream.Msg) { fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) }) defer consContext.Stop() ``` Similarly to `Messages()`, `Consume()` can be supplied with options to modify the behavior of a single pull request: - `PullMaxMessages(int)` - up to provided number of messages will be buffered - `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This setting and `PullMaxMessages` are mutually exclusive. The value should be set to a high enough value to accommodate the largest message expected from the server. Note that it may not be sufficient to set this value to the maximum message size, as this setting controls the client buffer size, not the max bytes requested from the server within a single pull request. If the value is set too low, the consumer will stall and not be able to consume messages. - `PullExpiry(time.Duration)` - timeout on a single pull request to the server type PullThresholdMessages int - `PullThresholdMessages(int)` - amount of messages which triggers refilling the buffer - `PullThresholdBytes(int)` - amount of bytes which triggers refilling the buffer - `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull request. An error will be triggered if at least 2 heartbeats are missed - `WithConsumeErrHandler(func (ConsumeContext, error))` - when used, sets a custom error handler on `Consume()`, allowing e.g. tracking missing heartbeats. - `PullMaxMessagesWithBytesLimit` - up to the provided number of messages will be buffered and a single fetch size will be limited to the provided value. This is an advanced option and should be used with caution. Most of the time, `PullMaxMessages` or `PullMaxBytes` should be used instead. Note that he byte limit should never be set to a value lower than the maximum message size that can be expected from the server. If the byte limit is lower than the maximum message size, the consumer will stall and not be able to consume messages. > __NOTE__: `Stop()` should always be called on `ConsumeContext` to avoid > leaking goroutines. ##### Using `Messages()` to iterate over incoming messages ```go iter, _ := cons.Messages() for { msg, err := iter.Next() // Next can return error, e.g. when iterator is closed or no heartbeats were received if err != nil { //handle error } fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) msg.Ack() } iter.Stop() ``` It can also be configured to only store up to defined number of messages/bytes in the buffer. ```go // a maximum of 10 messages or 1024 bytes will be stored in memory (whichever is encountered first) iter, _ := cons.Messages(jetstream.PullMaxMessages(10), jetstream.PullMaxBytes(1024)) ``` `Messages()` exposes the following options: - `PullMaxMessages(int)` - up to provided number of messages will be buffered - `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This setting and `PullMaxMessages` are mutually exclusive. The value should be set to a high enough value to accommodate the largest message expected from the server. Note that it may not be sufficient to set this value to the maximum message size, as this setting controls the client buffer size, not the max bytes requested from the server within a single pull request. If the value is set too low, the consumer will stall and not be able to consume messages. - `PullExpiry(time.Duration)` - timeout on a single pull request to the server type PullThresholdMessages int - `PullThresholdMessages(int)` - amount of messages which triggers refilling the buffer - `PullThresholdBytes(int)` - amount of bytes which triggers refilling the buffer - `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull request. An error will be triggered if at least 2 heartbeats are missed (unless `WithMessagesErrOnMissingHeartbeat(false)` is used) - `PullMaxMessagesWithBytesLimit` - up to the provided number of messages will be buffered and a single fetch size will be limited to the provided value. This is an advanced option and should be used with caution. Most of the time, `PullMaxMessages` or `PullMaxBytes` should be used instead. Note that he byte limit should never be set to a value lower than the maximum message size that can be expected from the server. If the byte limit is lower than the maximum message size, the consumer will stall and not be able to consume messages. ##### Using `Messages()` to fetch single messages one by one When implementing work queue, it is possible to use `Messages()` in order to fetch messages from the server one-by-one, without optimizations and pre-buffering (to avoid redeliveries when processing messages at slow rate). ```go // PullMaxMessages determines how many messages will be sent to the client in a single pull request iter, _ := cons.Messages(jetstream.PullMaxMessages(1)) numWorkers := 5 sem := make(chan struct{}, numWorkers) for { sem <- struct{}{} go func() { defer func() { <-sem }() msg, err := iter.Next() if err != nil { // handle err } fmt.Printf("Processing msg: %s\n", string(msg.Data())) doWork() msg.Ack() }() } ``` ## Publishing on stream `JetStream` interface allows publishing messages on stream in 2 ways: ### __Synchronous publish__ ```go js, _ := jetstream.New(nc) // Publish message on subject ORDERS.new // Given subject has to belong to a stream ack, err := js.PublishMsg(ctx, &nats.Msg{ Data: []byte("hello"), Subject: "ORDERS.new", }) fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) // A helper method accepting subject and data as parameters ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello")) ``` Both `Publish()` and `PublishMsg()` can be supplied with options allowing setting various headers. Additionally, for `PublishMsg()` headers can be set directly on `nats.Msg`. ```go // All 3 implementations are work identically ack, err := js.PublishMsg(ctx, &nats.Msg{ Data: []byte("hello"), Subject: "ORDERS.new", Header: nats.Header{ "Nats-Msg-Id": []string{"id"}, }, }) ack, err = js.PublishMsg(ctx, &nats.Msg{ Data: []byte("hello"), Subject: "ORDERS.new", }, jetstream.WithMsgID("id")) ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello"), jetstream.WithMsgID("id")) ``` ### __Async publish__ ```go js, _ := jetstream.New(nc) // publish message and do not wait for ack ackF, err := js.PublishMsgAsync(ctx, &nats.Msg{ Data: []byte("hello"), Subject: "ORDERS.new", }) // block and wait for ack select { case ack := <-ackF.Ok(): fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) case err := <-ackF.Err(): fmt.Println(err) } // similarly to synchronous publish, there is a helper method accepting subject and data ackF, err = js.PublishAsync("ORDERS.new", []byte("hello")) ``` Just as for synchronous publish, `PublishAsync()` and `PublishMsgAsync()` accept options for setting headers. ## KeyValue Store JetStream KeyValue Stores offer a straightforward method for storing key-value pairs within JetStream. These stores are supported by a specially configured stream, designed to efficiently and compactly store these pairs. This structure ensures rapid and convenient access to the data. The KV Store, also known as a bucket, enables the execution of various operations: - create/update a value for a given key - get a value for a given key - delete a value for a given key - purge all values from a bucket - list all keys in a bucket - watch for changes on given key set or the whole bucket - retrieve history of changes for a given key ### Basic usage of KV bucket The most basic usage of KV bucket is to create or retrieve a bucket and perform basic CRUD operations on keys. ```go js, _ := jetstream.New(nc) ctx := context.Background() // Create a new bucket. Bucket name is required and has to be unique within a JetStream account. kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) // Set a value for a given key // Put will either create or update a value for a given key kv.Put(ctx, "sue.color", []byte("blue")) // Get an entry for a given key // Entry contains key/value, but also metadata (revision, timestamp, etc.)) entry, _ := kv.Get(ctx, "sue.color") // Prints `sue.color @ 1 -> "blue"` fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) // Update a value for a given key // Update will fail if the key does not exist or the revision has changed kv.Update(ctx, "sue.color", []byte("red"), 1) // Create will fail if the key already exists _, err := kv.Create(ctx, "sue.color", []byte("purple")) fmt.Println(err) // prints `nats: key exists` // Delete a value for a given key. // Delete is not destructive, it will add a delete marker for a given key // and all previous revisions will still be available kv.Delete(ctx, "sue.color") // getting a deleted key will return an error _, err = kv.Get(ctx, "sue.color") fmt.Println(err) // prints `nats: key not found` // A bucket can be deleted once it is no longer needed js.DeleteKeyValue(ctx, "profiles") ``` ### Watching for changes on a bucket KV buckets support Watchers, which can be used to watch for changes on a given key or the whole bucket. Watcher will receive a notification on a channel when a change occurs. By default, watcher will return initial values for all matching keys. After sending all initial values, watcher will send nil on the channel to signal that all initial values have been sent and it will start sending updates when changes occur. Watcher supports several configuration options: - `IncludeHistory` will have the key watcher send all historical values for each key (up to KeyValueMaxHistory). - `IgnoreDeletes` will have the key watcher not pass any keys with delete markers. - `UpdatesOnly` will have the key watcher only pass updates on values (without values already present when starting). - `MetaOnly` will have the key watcher retrieve only the entry metadata, not the entry value. - `ResumeFromRevision` instructs the key watcher to resume from a specific revision number. ```go js, _ := jetstream.New(nc) ctx := context.Background() kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) kv.Put(ctx, "sue.color", []byte("blue")) // A watcher can be created to watch for changes on a given key or the whole bucket // By default, watcher will return most recent values for all matching keys. // Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. watcher, _ := kv.Watch(ctx, "sue.*") defer watcher.Stop() kv.Put(ctx, "sue.age", []byte("43")) kv.Put(ctx, "sue.color", []byte("red")) // First, the watcher sends most recent values for all matching keys. // In this case, it will send a single entry for `sue.color`. entry := <-watcher.Updates() // Prints `sue.color @ 1 -> "blue"` fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) // After all current values have been sent, watcher will send nil on the channel. entry = <-watcher.Updates() if entry != nil { fmt.Println("Unexpected entry received") } // After that, watcher will send updates when changes occur // In this case, it will send an entry for `sue.color` and `sue.age`. entry = <-watcher.Updates() // Prints `sue.age @ 2 -> "43"` fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) entry = <-watcher.Updates() // Prints `sue.color @ 3 -> "red"` fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) ``` ### Additional operations on a bucket In addition to basic CRUD operations and watching for changes, KV buckets support several additional operations: - `ListKeys` will return all keys in a bucket ```go js, _ := jetstream.New(nc) ctx := context.Background() kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) kv.Put(ctx, "sue.color", []byte("blue")) kv.Put(ctx, "sue.age", []byte("43")) kv.Put(ctx, "bucket", []byte("profiles")) keys, _ := kv.ListKeys(ctx) // Prints all 3 keys for key := range keys.Keys() { fmt.Println(key) } ``` - `Purge` and `PurgeDeletes` for removing all keys from a bucket ```go js, _ := jetstream.New(nc) ctx := context.Background() kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) kv.Put(ctx, "sue.color", []byte("blue")) kv.Put(ctx, "sue.age", []byte("43")) kv.Put(ctx, "bucket", []byte("profiles")) // Purge will remove all keys from a bucket. // The latest revision of each key will be kept // with a delete marker, all previous revisions will be removed // permanently. kv.Purge(ctx) // PurgeDeletes will remove all keys from a bucket // with a delete marker. kv.PurgeDeletes(ctx) ``` - `Status` will return the current status of a bucket ```go js, _ := jetstream.New(nc) ctx := context.Background() kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) kv.Put(ctx, "sue.color", []byte("blue")) kv.Put(ctx, "sue.age", []byte("43")) kv.Put(ctx, "bucket", []byte("profiles")) status, _ := kv.Status(ctx) fmt.Println(status.Bucket()) // prints `profiles` fmt.Println(status.Values()) // prints `3` fmt.Println(status.Bytes()) // prints the size of all values in bytes ``` ## Object Store JetStream Object Stores offer a straightforward method for storing large objects within JetStream. These stores are backed by a specially configured streams, designed to efficiently and compactly store these objects. The Object Store, also known as a bucket, enables the execution of various operations: - create/update an object - get an object - delete an object - list all objects in a bucket - watch for changes on objects in a bucket - create links to other objects or other buckets ### Basic usage of Object Store The most basic usage of Object bucket is to create or retrieve a bucket and perform basic CRUD operations on objects. ```go js, _ := jetstream.New(nc) ctx := context.Background() // Create a new bucket. Bucket name is required and has to be unique within a JetStream account. os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) config1 := bytes.NewBufferString("first config") // Put an object in a bucket. Put expects an object metadata and a reader // to read the object data from. os.Put(ctx, jetstream.ObjectMeta{Name: "config-1"}, config1) // Objects can also be created using various helper methods // 1. As raw strings os.PutString(ctx, "config-2", "second config") // 2. As raw bytes os.PutBytes(ctx, "config-3", []byte("third config")) // 3. As a file os.PutFile(ctx, "config-4.txt") // Get an object // Get returns a reader and object info // Similar to Put, Get can also be used with helper methods // to retrieve object data as a string, bytes or to save it to a file object, _ := os.Get(ctx, "config-1") data, _ := io.ReadAll(object) info, _ := object.Info() // Prints `configs.config-1 -> "first config"` fmt.Printf("%s.%s -> %q\n", info.Bucket, info.Name, string(data)) // Delete an object. // Delete will remove object data from stream, but object metadata will be kept // with a delete marker. os.Delete(ctx, "config-1") // getting a deleted object will return an error _, err := os.Get(ctx, "config-1") fmt.Println(err) // prints `nats: object not found` // A bucket can be deleted once it is no longer needed js.DeleteObjectStore(ctx, "configs") ``` ### Watching for changes on a store Object Stores support Watchers, which can be used to watch for changes on objects in a given bucket. Watcher will receive a notification on a channel when a change occurs. By default, watcher will return latest information for all objects in a bucket. After sending all initial values, watcher will send nil on the channel to signal that all initial values have been sent and it will start sending updates when changes occur. >__NOTE:__ Watchers do not retrieve values for objects, only metadata (containing >information such as object name, bucket name, object size etc.). If object data >is required, `Get` method should be used. Watcher supports several configuration options: - `IncludeHistory` will have the watcher send historical updates for each object. - `IgnoreDeletes` will have the watcher not pass any objects with delete markers. - `UpdatesOnly` will have the watcher only pass updates on objects (without objects already present when starting). ```go js, _ := jetstream.New(nc) ctx := context.Background() os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) os.PutString(ctx, "config-1", "first config") // By default, watcher will return most recent values for all objects in a bucket. // Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. watcher, _ := os.Watch(ctx) defer watcher.Stop() // create a second object os.PutString(ctx, "config-2", "second config") // update metadata of the first object os.UpdateMeta(ctx, "config-1", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) // First, the watcher sends most recent values for all matching objects. // In this case, it will send a single entry for `config-1`. object := <-watcher.Updates() // Prints `configs.config-1 -> ""` fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) // After all current values have been sent, watcher will send nil on the channel. object = <-watcher.Updates() if object != nil { fmt.Println("Unexpected object received") } // After that, watcher will send updates when changes occur // In this case, it will send an entry for `config-2` and `config-1`. object = <-watcher.Updates() // Prints `configs.config-2 -> ""` fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) object = <-watcher.Updates() // Prints `configs.config-1 -> "updated config"` fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) ``` ### Additional operations on a store In addition to basic CRUD operations and watching for changes, Object Stores support several additional operations: - `UpdateMeta` for updating object metadata, such as name, description, etc. ```go js, _ := jetstream.New(nc) ctx := context.Background() os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) os.PutString(ctx, "config", "data") // update metadata of the object to e.g. add a description os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config", Description: "this is a config"}) // object can be moved under a new name (unless it already exists) os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) ``` - `List` for listing information about all objects in a bucket: ```go js, _ := jetstream.New(nc) ctx := context.Background() os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) os.PutString(ctx, "config-1", "cfg1") os.PutString(ctx, "config-2", "cfg1") os.PutString(ctx, "config-3", "cfg1") // List will return information about all objects in a bucket objects, _ := os.List(ctx) // Prints all 3 objects for _, object := range objects { fmt.Println(object.Name) } ``` - `Status` will return the current status of a bucket ```go js, _ := jetstream.New(nc) ctx := context.Background() os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) os.PutString(ctx, "config-1", "cfg1") os.PutString(ctx, "config-2", "cfg1") os.PutString(ctx, "config-3", "cfg1") status, _ := os.Status(ctx) fmt.Println(status.Bucket()) // prints `configs` fmt.Println(status.Size()) // prints the size of the bucket in bytes ``` ## Examples You can find more examples of `jetstream` usage [here](https://github.com/nats-io/nats.go/tree/main/examples/jetstream). nats.go-1.41.0/jetstream/api.go000066400000000000000000000112701477351342400162570ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "encoding/json" "strings" ) type ( apiResponse struct { Type string `json:"type"` Error *APIError `json:"error,omitempty"` } // apiPaged includes variables used to create paged responses from the JSON API apiPaged struct { Total int `json:"total"` Offset int `json:"offset"` Limit int `json:"limit"` } ) // Request API subjects for JetStream. const ( // DefaultAPIPrefix is the default prefix for the JetStream API. DefaultAPIPrefix = "$JS.API." // jsDomainT is used to create JetStream API prefix by specifying only Domain jsDomainT = "$JS.%s.API." // jsExtDomainT is used to create a StreamSource External APIPrefix jsExtDomainT = "$JS.%s.API" // apiAccountInfo is for obtaining general information about JetStream. apiAccountInfo = "INFO" // apiConsumerCreateT is used to create consumers. apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" // apiConsumerCreateT is used to create consumers. // it accepts stream name, consumer name and filter subject apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" // apiConsumerInfoT is used to create consumers. apiConsumerInfoT = "CONSUMER.INFO.%s.%s" // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" // apiConsumerDeleteT is used to delete consumers. apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" // apiConsumerPauseT is used to pause a consumer. apiConsumerPauseT = "CONSUMER.PAUSE.%s.%s" // apiConsumerListT is used to return all detailed consumer information apiConsumerListT = "CONSUMER.LIST.%s" // apiConsumerNamesT is used to return a list with all consumer names for the stream. apiConsumerNamesT = "CONSUMER.NAMES.%s" // apiStreams can lookup a stream by subject. apiStreams = "STREAM.NAMES" // apiStreamCreateT is the endpoint to create new streams. apiStreamCreateT = "STREAM.CREATE.%s" // apiStreamInfoT is the endpoint to get information on a stream. apiStreamInfoT = "STREAM.INFO.%s" // apiStreamUpdateT is the endpoint to update existing streams. apiStreamUpdateT = "STREAM.UPDATE.%s" // apiStreamDeleteT is the endpoint to delete streams. apiStreamDeleteT = "STREAM.DELETE.%s" // apiStreamPurgeT is the endpoint to purge streams. apiStreamPurgeT = "STREAM.PURGE.%s" // apiStreamListT is the endpoint that will return all detailed stream information apiStreamListT = "STREAM.LIST" // apiMsgGetT is the endpoint to get a message. apiMsgGetT = "STREAM.MSG.GET.%s" // apiMsgGetT is the endpoint to perform a direct get of a message. apiDirectMsgGetT = "DIRECT.GET.%s" // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" // apiMsgDeleteT is the endpoint to remove a message. apiMsgDeleteT = "STREAM.MSG.DELETE.%s" // apiConsumerUnpinT is the endpoint to unpin a consumer. apiConsumerUnpinT = "CONSUMER.UNPIN.%s.%s" ) func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) { jsMsg, err := js.apiRequest(ctx, subject, data...) if err != nil { return nil, err } if err := json.Unmarshal(jsMsg.Data(), resp); err != nil { return nil, err } return jsMsg, nil } // a RequestWithContext with tracing via TraceCB func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) { subj = js.apiSubject(subj) var req []byte if len(data) > 0 { req = data[0] } if js.opts.clientTrace != nil { ctrace := js.opts.clientTrace if ctrace.RequestSent != nil { ctrace.RequestSent(subj, req) } } resp, err := js.conn.RequestWithContext(ctx, subj, req) if err != nil { return nil, err } if js.opts.clientTrace != nil { ctrace := js.opts.clientTrace if ctrace.ResponseReceived != nil { ctrace.ResponseReceived(subj, resp.Data, resp.Header) } } return js.toJSMsg(resp), nil } func (js *jetStream) apiSubject(subj string) string { if js.opts.apiPrefix == "" { return subj } var b strings.Builder b.WriteString(js.opts.apiPrefix) b.WriteString(subj) return b.String() } nats.go-1.41.0/jetstream/consumer.go000066400000000000000000000324151477351342400173450ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "crypto/sha256" "encoding/json" "fmt" "strings" "time" "github.com/nats-io/nats.go/internal/syncx" "github.com/nats-io/nuid" ) type ( // Consumer contains methods for fetching/processing messages from a stream, // as well as fetching consumer info. // // This package provides two implementations of Consumer interface: // // - Standard named/ephemeral pull consumers. These consumers are created using // CreateConsumer method on Stream or JetStream interface. They can be // explicitly configured (using [ConsumerConfig]) and managed by the user, // either from this package or externally. // // - Ordered consumers. These consumers are created using OrderedConsumer // method on Stream or JetStream interface. They are managed by the library // and provide a simple way to consume messages from a stream. Ordered // consumers are ephemeral in-memory pull consumers and are resilient to // deletes and restarts. They provide limited configuration options // using [OrderedConsumerConfig]. // // Consumer provides method for optimized continuous consumption of messages // using Consume and Messages methods, as well as simple one-off messages // retrieval using Fetch and Next methods. Consumer interface { // Fetch is used to retrieve up to a provided number of messages from a // stream. This method will send a single request and deliver either all // requested messages unless time out is met earlier. Fetch timeout // defaults to 30 seconds and can be configured using FetchMaxWait // option. // // By default, Fetch uses a 5s idle heartbeat for requests longer than // 10 seconds. For shorter requests, the idle heartbeat is disabled. // This can be configured using FetchHeartbeat option. If a client does // not receive a heartbeat message from a stream for more than 2 times // the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. // // Fetch is non-blocking and returns MessageBatch, exposing a channel // for delivered messages. // // Messages channel is always closed, thus it is safe to range over it // without additional checks. After the channel is closed, // MessageBatch.Error() should be checked to see if there was an error // during message delivery (e.g. missing heartbeat). Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) // FetchBytes is used to retrieve up to a provided bytes from the // stream. This method will send a single request and deliver the // provided number of bytes unless time out is met earlier. FetchBytes // timeout defaults to 30 seconds and can be configured using // FetchMaxWait option. // // By default, FetchBytes uses a 5s idle heartbeat for requests longer than // 10 seconds. For shorter requests, the idle heartbeat is disabled. // This can be configured using FetchHeartbeat option. If a client does // not receive a heartbeat message from a stream for more than 2 times // the idle heartbeat setting, Fetch will return ErrNoHeartbeat. // // FetchBytes is non-blocking and returns MessageBatch, exposing a channel // for delivered messages. // // Messages channel is always closed, thus it is safe to range over it // without additional checks. After the channel is closed, // MessageBatch.Error() should be checked to see if there was an error // during message delivery (e.g. missing heartbeat). FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) // FetchNoWait is used to retrieve up to a provided number of messages // from a stream. Unlike Fetch, FetchNoWait will only deliver messages // that are currently available in the stream and will not wait for new // messages to arrive, even if batch size is not met. // // FetchNoWait is non-blocking and returns MessageBatch, exposing a // channel for delivered messages. // // Messages channel is always closed, thus it is safe to range over it // without additional checks. After the channel is closed, // MessageBatch.Error() should be checked to see if there was an error // during message delivery (e.g. missing heartbeat). FetchNoWait(batch int) (MessageBatch, error) // Consume will continuously receive messages and handle them // with the provided callback function. Consume can be configured using // PullConsumeOpt options: // // - Error handling and monitoring can be configured using ConsumeErrHandler // option, which provides information about errors encountered during // consumption (both transient and terminal) // - Consume can be configured to stop after a certain number of // messages is received using StopAfter option. // - Consume can be optimized for throughput or memory usage using // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. // Unless there is a specific use case, these options should not be used. // // Consume returns a ConsumeContext, which can be used to stop or drain // the consumer. Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) // Messages returns MessagesContext, allowing continuously iterating // over messages on a stream. Messages can be configured using // PullMessagesOpt options: // // - Messages can be optimized for throughput or memory usage using // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. // Unless there is a specific use case, these options should not be used. // - WithMessagesErrOnMissingHeartbeat can be used to enable/disable // erroring out on MessagesContext.Next when a heartbeat is missing. // This option is enabled by default. Messages(opts ...PullMessagesOpt) (MessagesContext, error) // Next is used to retrieve the next message from the consumer. This // method will block until the message is retrieved or timeout is // reached. Next(opts ...FetchOpt) (Msg, error) // Info fetches current ConsumerInfo from the server. Info(context.Context) (*ConsumerInfo, error) // CachedInfo returns ConsumerInfo currently cached on this consumer. // This method does not perform any network requests. The cached // ConsumerInfo is updated on every call to Info and Update. CachedInfo() *ConsumerInfo } createConsumerRequest struct { Stream string `json:"stream_name"` Config *ConsumerConfig `json:"config"` Action string `json:"action"` } ) // Info fetches current ConsumerInfo from the server. func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { ctx, cancel := p.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } infoSubject := fmt.Sprintf(apiConsumerInfoT, p.stream, p.name) var resp consumerInfoResponse if _, err := p.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return nil, ErrConsumerNotFound } return nil, resp.Error } if resp.Error == nil && resp.ConsumerInfo == nil { return nil, ErrConsumerNotFound } p.info = resp.ConsumerInfo return resp.ConsumerInfo, nil } // CachedInfo returns ConsumerInfo currently cached on this consumer. // This method does not perform any network requests. The cached // ConsumerInfo is updated on every call to Info and Update. func (p *pullConsumer) CachedInfo() *ConsumerInfo { return p.info } func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } req := createConsumerRequest{ Stream: stream, Config: &cfg, Action: action, } reqJSON, err := json.Marshal(req) if err != nil { return nil, err } consumerName := cfg.Name if consumerName == "" { if cfg.Durable != "" { consumerName = cfg.Durable } else { consumerName = generateConsName() } } if err := validateConsumerName(consumerName); err != nil { return nil, err } var ccSubj string if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 { if err := validateSubject(cfg.FilterSubject); err != nil { return nil, err } ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) } else { ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) } var resp consumerInfoResponse if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNotFound { return nil, ErrStreamNotFound } return nil, resp.Error } // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 { return nil, ErrConsumerMultipleFilterSubjectsNotSupported } return &pullConsumer{ js: js, stream: stream, name: resp.Name, durable: cfg.Durable != "", info: resp.ConsumerInfo, subs: syncx.Map[string, *pullSubscription]{}, }, nil } const ( consumerActionCreate = "create" consumerActionUpdate = "update" consumerActionCreateOrUpdate = "" ) func generateConsName() string { name := nuid.Next() sha := sha256.New() sha.Write([]byte(name)) b := sha.Sum(nil) for i := 0; i < 8; i++ { b[i] = rdigits[int(b[i]%base)] } return string(b[:8]) } func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } if err := validateConsumerName(name); err != nil { return nil, err } infoSubject := fmt.Sprintf(apiConsumerInfoT, stream, name) var resp consumerInfoResponse if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return nil, ErrConsumerNotFound } return nil, resp.Error } if resp.Error == nil && resp.ConsumerInfo == nil { return nil, ErrConsumerNotFound } cons := &pullConsumer{ js: js, stream: stream, name: name, durable: resp.Config.Durable != "", info: resp.ConsumerInfo, subs: syncx.Map[string, *pullSubscription]{}, } return cons, nil } func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } if err := validateConsumerName(consumer); err != nil { return err } deleteSubject := fmt.Sprintf(apiConsumerDeleteT, stream, consumer) var resp consumerDeleteResponse if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { return err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return ErrConsumerNotFound } return resp.Error } return nil } func pauseConsumer(ctx context.Context, js *jetStream, stream, consumer string, pauseUntil *time.Time) (*ConsumerPauseResponse, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } if err := validateConsumerName(consumer); err != nil { return nil, err } subject := fmt.Sprintf(apiConsumerPauseT, stream, consumer) var resp consumerPauseApiResponse req, err := json.Marshal(consumerPauseRequest{ PauseUntil: pauseUntil, }) if err != nil { return nil, err } if _, err := js.apiRequestJSON(ctx, subject, &resp, req); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return nil, ErrConsumerNotFound } return nil, resp.Error } return &ConsumerPauseResponse{ Paused: resp.Paused, PauseUntil: resp.PauseUntil, PauseRemaining: resp.PauseRemaining, }, nil } func resumeConsumer(ctx context.Context, js *jetStream, stream, consumer string) (*ConsumerPauseResponse, error) { return pauseConsumer(ctx, js, stream, consumer, nil) } func validateConsumerName(dur string) error { if dur == "" { return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, "name is required") } if strings.ContainsAny(dur, ">*. /\\") { return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, dur) } return nil } func unpinConsumer(ctx context.Context, js *jetStream, stream, consumer, group string) error { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } if err := validateConsumerName(consumer); err != nil { return err } unpinSubject := fmt.Sprintf(apiConsumerUnpinT, stream, consumer) var req = consumerUnpinRequest{ Group: group, } reqJSON, err := json.Marshal(req) if err != nil { return err } var resp apiResponse if _, err := js.apiRequestJSON(ctx, unpinSubject, &resp, reqJSON); err != nil { return err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return ErrConsumerNotFound } return resp.Error } return nil } nats.go-1.41.0/jetstream/consumer_config.go000066400000000000000000000460221477351342400206710ustar00rootroot00000000000000// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "encoding/json" "fmt" "time" ) type ( // ConsumerInfo is the detailed information about a JetStream consumer. ConsumerInfo struct { // Stream specifies the name of the stream that the consumer is bound // to. Stream string `json:"stream_name"` // Name represents the unique identifier for the consumer. This can be // either set explicitly by the client or generated automatically if not // set. Name string `json:"name"` // Created is the timestamp when the consumer was created. Created time.Time `json:"created"` // Config contains the configuration settings of the consumer, set when // creating or updating the consumer. Config ConsumerConfig `json:"config"` // Delivered holds information about the most recently delivered // message, including its sequence numbers and timestamp. Delivered SequenceInfo `json:"delivered"` // AckFloor indicates the message before the first unacknowledged // message. AckFloor SequenceInfo `json:"ack_floor"` // NumAckPending is the number of messages that have been delivered but // not yet acknowledged. NumAckPending int `json:"num_ack_pending"` // NumRedelivered counts the number of messages that have been // redelivered and not yet acknowledged. Each message is counted only // once, even if it has been redelivered multiple times. This count is // reset when the message is eventually acknowledged. NumRedelivered int `json:"num_redelivered"` // NumWaiting is the count of active pull requests. It is only relevant // for pull-based consumers. NumWaiting int `json:"num_waiting"` // NumPending is the number of messages that match the consumer's // filter, but have not been delivered yet. NumPending uint64 `json:"num_pending"` // Cluster contains information about the cluster to which this consumer // belongs (if applicable). Cluster *ClusterInfo `json:"cluster,omitempty"` // PushBound indicates whether at least one subscription exists for the // delivery subject of this consumer. This is only applicable to // push-based consumers. PushBound bool `json:"push_bound,omitempty"` // TimeStamp indicates when the info was gathered by the server. TimeStamp time.Time `json:"ts"` // PriorityGroups contains the information about the currently defined priority groups PriorityGroups []PriorityGroupState `json:"priority_groups,omitempty"` // Paused indicates whether the consumer is paused. Paused bool `json:"paused,omitempty"` // PauseRemaining contains the amount of time left until the consumer // unpauses. It will only be non-zero if the consumer is currently paused. PauseRemaining time.Duration `json:"pause_remaining,omitempty"` } PriorityGroupState struct { // Group this status is for. Group string `json:"group"` // PinnedClientID is the generated ID of the pinned client. PinnedClientID string `json:"pinned_client_id,omitempty"` // PinnedTS is the timestamp when the client was pinned. PinnedTS time.Time `json:"pinned_ts,omitempty"` } // ConsumerConfig is the configuration of a JetStream consumer. ConsumerConfig struct { // Name is an optional name for the consumer. If not set, one is // generated automatically. // // Name cannot contain whitespace, ., *, >, path separators (forward or // backwards slash), and non-printable characters. Name string `json:"name,omitempty"` // Durable is an optional durable name for the consumer. If both Durable // and Name are set, they have to be equal. Unless InactiveThreshold is set, a // durable consumer will not be cleaned up automatically. // // Durable cannot contain whitespace, ., *, >, path separators (forward or // backwards slash), and non-printable characters. Durable string `json:"durable_name,omitempty"` // Description provides an optional description of the consumer. Description string `json:"description,omitempty"` // DeliverPolicy defines from which point to start delivering messages // from the stream. Defaults to DeliverAllPolicy. DeliverPolicy DeliverPolicy `json:"deliver_policy"` // OptStartSeq is an optional sequence number from which to start // message delivery. Only applicable when DeliverPolicy is set to // DeliverByStartSequencePolicy. OptStartSeq uint64 `json:"opt_start_seq,omitempty"` // OptStartTime is an optional time from which to start message // delivery. Only applicable when DeliverPolicy is set to // DeliverByStartTimePolicy. OptStartTime *time.Time `json:"opt_start_time,omitempty"` // AckPolicy defines the acknowledgement policy for the consumer. // Defaults to AckExplicitPolicy. AckPolicy AckPolicy `json:"ack_policy"` // AckWait defines how long the server will wait for an acknowledgement // before resending a message. If not set, server default is 30 seconds. AckWait time.Duration `json:"ack_wait,omitempty"` // MaxDeliver defines the maximum number of delivery attempts for a // message. Applies to any message that is re-sent due to ack policy. // If not set, server default is -1 (unlimited). MaxDeliver int `json:"max_deliver,omitempty"` // BackOff specifies the optional back-off intervals for retrying // message delivery after a failed acknowledgement. It overrides // AckWait. // // BackOff only applies to messages not acknowledged in specified time, // not messages that were nack'ed. // // The number of intervals specified must be lower or equal to // MaxDeliver. If the number of intervals is lower, the last interval is // used for all remaining attempts. BackOff []time.Duration `json:"backoff,omitempty"` // FilterSubject can be used to filter messages delivered from the // stream. FilterSubject is exclusive with FilterSubjects. FilterSubject string `json:"filter_subject,omitempty"` // ReplayPolicy defines the rate at which messages are sent to the // consumer. If ReplayOriginalPolicy is set, messages are sent in the // same intervals in which they were stored on stream. This can be used // e.g. to simulate production traffic in development environments. If // ReplayInstantPolicy is set, messages are sent as fast as possible. // Defaults to ReplayInstantPolicy. ReplayPolicy ReplayPolicy `json:"replay_policy"` // RateLimit specifies an optional maximum rate of message delivery in // bits per second. RateLimit uint64 `json:"rate_limit_bps,omitempty"` // SampleFrequency is an optional frequency for sampling how often // acknowledgements are sampled for observability. See // https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream SampleFrequency string `json:"sample_freq,omitempty"` // MaxWaiting is a maximum number of pull requests waiting to be // fulfilled. If not set, this will inherit settings from stream's // ConsumerLimits or (if those are not set) from account settings. If // neither are set, server default is 512. MaxWaiting int `json:"max_waiting,omitempty"` // MaxAckPending is a maximum number of outstanding unacknowledged // messages. Once this limit is reached, the server will suspend sending // messages to the consumer. If not set, server default is 1000. // Set to -1 for unlimited. MaxAckPending int `json:"max_ack_pending,omitempty"` // HeadersOnly indicates whether only headers of messages should be sent // (and no payload). Defaults to false. HeadersOnly bool `json:"headers_only,omitempty"` // MaxRequestBatch is the optional maximum batch size a single pull // request can make. When set with MaxRequestMaxBytes, the batch size // will be constrained by whichever limit is hit first. MaxRequestBatch int `json:"max_batch,omitempty"` // MaxRequestExpires is the maximum duration a single pull request will // wait for messages to be available to pull. MaxRequestExpires time.Duration `json:"max_expires,omitempty"` // MaxRequestMaxBytes is the optional maximum total bytes that can be // requested in a given batch. When set with MaxRequestBatch, the batch // size will be constrained by whichever limit is hit first. MaxRequestMaxBytes int `json:"max_bytes,omitempty"` // InactiveThreshold is a duration which instructs the server to clean // up the consumer if it has been inactive for the specified duration. // Durable consumers will not be cleaned up by default, but if // InactiveThreshold is set, they will be. If not set, this will inherit // settings from stream's ConsumerLimits. If neither are set, server // default is 5 seconds. // // A consumer is considered inactive there are not pull requests // received by the server (for pull consumers), or no interest detected // on deliver subject (for push consumers), not if there are no // messages to be delivered. InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` // Replicas the number of replicas for the consumer's state. By default, // consumers inherit the number of replicas from the stream. Replicas int `json:"num_replicas"` // MemoryStorage is a flag to force the consumer to use memory storage // rather than inherit the storage type from the stream. MemoryStorage bool `json:"mem_storage,omitempty"` // FilterSubjects allows filtering messages from a stream by subject. // This field is exclusive with FilterSubject. Requires nats-server // v2.10.0 or later. FilterSubjects []string `json:"filter_subjects,omitempty"` // Metadata is a set of application-defined key-value pairs for // associating metadata on the consumer. This feature requires // nats-server v2.10.0 or later. Metadata map[string]string `json:"metadata,omitempty"` // PauseUntil is for suspending the consumer until the deadline. PauseUntil *time.Time `json:"pause_until,omitempty"` // PriorityPolicy represents he priority policy the consumer is set to. // Requires nats-server v2.11.0 or later. PriorityPolicy PriorityPolicy `json:"priority_policy,omitempty"` // PinnedTTL represents the time after which the client will be unpinned // if no new pull requests are sent.Used with PriorityPolicyPinned. // Requires nats-server v2.11.0 or later. PinnedTTL time.Duration `json:"priority_timeout,omitempty"` // PriorityGroups is a list of priority groups this consumer supports. PriorityGroups []string `json:"priority_groups,omitempty"` } // OrderedConsumerConfig is the configuration of an ordered JetStream // consumer. For more information, see [Ordered Consumers] in README // // [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers OrderedConsumerConfig struct { // FilterSubjects allows filtering messages from a stream by subject. // This field is exclusive with FilterSubject. Requires nats-server // v2.10.0 or later. FilterSubjects []string `json:"filter_subjects,omitempty"` // DeliverPolicy defines from which point to start delivering messages // from the stream. Defaults to DeliverAllPolicy. DeliverPolicy DeliverPolicy `json:"deliver_policy"` // OptStartSeq is an optional sequence number from which to start // message delivery. Only applicable when DeliverPolicy is set to // DeliverByStartSequencePolicy. OptStartSeq uint64 `json:"opt_start_seq,omitempty"` // OptStartTime is an optional time from which to start message // delivery. Only applicable when DeliverPolicy is set to // DeliverByStartTimePolicy. OptStartTime *time.Time `json:"opt_start_time,omitempty"` // ReplayPolicy defines the rate at which messages are sent to the // consumer. If ReplayOriginalPolicy is set, messages are sent in the // same intervals in which they were stored on stream. This can be used // e.g. to simulate production traffic in development environments. If // ReplayInstantPolicy is set, messages are sent as fast as possible. // Defaults to ReplayInstantPolicy. ReplayPolicy ReplayPolicy `json:"replay_policy"` // InactiveThreshold is a duration which instructs the server to clean // up the consumer if it has been inactive for the specified duration. // Defaults to 5m. InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` // HeadersOnly indicates whether only headers of messages should be sent // (and no payload). Defaults to false. HeadersOnly bool `json:"headers_only,omitempty"` // Maximum number of attempts for the consumer to be recreated in a // single recreation cycle. Defaults to unlimited. MaxResetAttempts int // Metadata is a set of application-defined key-value pairs for // associating metadata on the consumer. This feature requires // nats-server v2.10.0 or later. Metadata map[string]string `json:"metadata,omitempty"` } // DeliverPolicy determines from which point to start delivering messages. DeliverPolicy int // AckPolicy determines how the consumer should acknowledge delivered // messages. AckPolicy int // ReplayPolicy determines how the consumer should replay messages it // already has queued in the stream. ReplayPolicy int // SequenceInfo has both the consumer and the stream sequence and last // activity. SequenceInfo struct { Consumer uint64 `json:"consumer_seq"` Stream uint64 `json:"stream_seq"` Last *time.Time `json:"last_active,omitempty"` } // PriorityPolicy determines the priority policy the consumer is set to. PriorityPolicy int ) const ( // PriorityPolicyNone is the default priority policy. PriorityPolicyNone PriorityPolicy = iota // PriorityPolicyPinned is the priority policy that pins a consumer to a // specific client. PriorityPolicyPinned // PriorityPolicyOverflow is the priority policy that allows for // restricting when a consumer will receive messages based on the number of // pending messages or acks. PriorityPolicyOverflow ) func (p *PriorityPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString(""): *p = PriorityPolicyNone case jsonString("pinned_client"): *p = PriorityPolicyPinned case jsonString("overflow"): *p = PriorityPolicyOverflow default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p PriorityPolicy) MarshalJSON() ([]byte, error) { switch p { case PriorityPolicyNone: return json.Marshal("") case PriorityPolicyPinned: return json.Marshal("pinned_client") case PriorityPolicyOverflow: return json.Marshal("overflow") } return nil, fmt.Errorf("nats: unknown priority policy %v", p) } const ( // DeliverAllPolicy starts delivering messages from the very beginning of a // stream. This is the default. DeliverAllPolicy DeliverPolicy = iota // DeliverLastPolicy will start the consumer with the last sequence // received. DeliverLastPolicy // DeliverNewPolicy will only deliver new messages that are sent after the // consumer is created. DeliverNewPolicy // DeliverByStartSequencePolicy will deliver messages starting from a given // sequence configured with OptStartSeq in ConsumerConfig. DeliverByStartSequencePolicy // DeliverByStartTimePolicy will deliver messages starting from a given time // configured with OptStartTime in ConsumerConfig. DeliverByStartTimePolicy // DeliverLastPerSubjectPolicy will start the consumer with the last message // for all subjects received. DeliverLastPerSubjectPolicy ) func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("all"), jsonString("undefined"): *p = DeliverAllPolicy case jsonString("last"): *p = DeliverLastPolicy case jsonString("new"): *p = DeliverNewPolicy case jsonString("by_start_sequence"): *p = DeliverByStartSequencePolicy case jsonString("by_start_time"): *p = DeliverByStartTimePolicy case jsonString("last_per_subject"): *p = DeliverLastPerSubjectPolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p DeliverPolicy) MarshalJSON() ([]byte, error) { switch p { case DeliverAllPolicy: return json.Marshal("all") case DeliverLastPolicy: return json.Marshal("last") case DeliverNewPolicy: return json.Marshal("new") case DeliverByStartSequencePolicy: return json.Marshal("by_start_sequence") case DeliverByStartTimePolicy: return json.Marshal("by_start_time") case DeliverLastPerSubjectPolicy: return json.Marshal("last_per_subject") } return nil, fmt.Errorf("nats: unknown deliver policy %v", p) } func (p DeliverPolicy) String() string { switch p { case DeliverAllPolicy: return "all" case DeliverLastPolicy: return "last" case DeliverNewPolicy: return "new" case DeliverByStartSequencePolicy: return "by_start_sequence" case DeliverByStartTimePolicy: return "by_start_time" case DeliverLastPerSubjectPolicy: return "last_per_subject" } return "" } const ( // AckExplicitPolicy requires ack or nack for all messages. AckExplicitPolicy AckPolicy = iota // AckAllPolicy when acking a sequence number, this implicitly acks all // sequences below this one as well. AckAllPolicy // AckNonePolicy requires no acks for delivered messages. AckNonePolicy ) func (p *AckPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("none"): *p = AckNonePolicy case jsonString("all"): *p = AckAllPolicy case jsonString("explicit"): *p = AckExplicitPolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p AckPolicy) MarshalJSON() ([]byte, error) { switch p { case AckNonePolicy: return json.Marshal("none") case AckAllPolicy: return json.Marshal("all") case AckExplicitPolicy: return json.Marshal("explicit") } return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) } func (p AckPolicy) String() string { switch p { case AckNonePolicy: return "AckNone" case AckAllPolicy: return "AckAll" case AckExplicitPolicy: return "AckExplicit" } return "Unknown AckPolicy" } const ( // ReplayInstantPolicy will replay messages as fast as possible. ReplayInstantPolicy ReplayPolicy = iota // ReplayOriginalPolicy will maintain the same timing as the messages were // received. ReplayOriginalPolicy ) func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("instant"): *p = ReplayInstantPolicy case jsonString("original"): *p = ReplayOriginalPolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p ReplayPolicy) MarshalJSON() ([]byte, error) { switch p { case ReplayOriginalPolicy: return json.Marshal("original") case ReplayInstantPolicy: return json.Marshal("instant") } return nil, fmt.Errorf("nats: unknown replay policy %v", p) } func (p ReplayPolicy) String() string { switch p { case ReplayOriginalPolicy: return "original" case ReplayInstantPolicy: return "instant" } return "" } nats.go-1.41.0/jetstream/errors.go000066400000000000000000000475041477351342400170330ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "errors" "fmt" ) type ( // JetStreamError is an error result that happens when using JetStream. // In case of client-side error, [APIError] returns nil. JetStreamError interface { APIError() *APIError error } jsError struct { apiErr *APIError message string } // APIError is included in all API responses if there was an error. APIError struct { Code int `json:"code"` ErrorCode ErrorCode `json:"err_code"` Description string `json:"description,omitempty"` } // ErrorCode represents error_code returned in response from JetStream API. ErrorCode uint16 ) const ( JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 JSErrCodeJetStreamNotEnabled ErrorCode = 10076 JSErrCodeStreamNotFound ErrorCode = 10059 JSErrCodeStreamNameInUse ErrorCode = 10058 JSErrCodeConsumerCreate ErrorCode = 10012 JSErrCodeConsumerNotFound ErrorCode = 10014 JSErrCodeConsumerNameExists ErrorCode = 10013 JSErrCodeConsumerAlreadyExists ErrorCode = 10105 JSErrCodeConsumerExists ErrorCode = 10148 JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 JSErrCodeConsumerEmptyFilter ErrorCode = 10139 JSErrCodeConsumerDoesNotExist ErrorCode = 10149 JSErrCodeMessageNotFound ErrorCode = 10037 JSErrCodeBadRequest ErrorCode = 10003 JSErrCodeStreamWrongLastSequence ErrorCode = 10071 ) var ( // JetStream API errors // ErrJetStreamNotEnabled is an error returned when JetStream is not // enabled. // // Note: This error will not be returned in clustered mode, even if each // server in the cluster does not have JetStream enabled. In clustered mode, // requests will time out instead. ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is // not enabled for an account. ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} // ErrStreamNotFound is an error returned when stream with given name does // not exist. ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} // ErrStreamNameAlreadyInUse is returned when a stream with given name // already exists and has a different configuration. ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} // ErrStreamSubjectTransformNotSupported is returned when the connected // nats-server version does not support setting the stream subject // transform. If this error is returned when executing CreateStream(), the // stream with invalid configuration was already created in the server. ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} // ErrStreamSourceSubjectTransformNotSupported is returned when the // connected nats-server version does not support setting the stream source // subject transform. If this error is returned when executing // CreateStream(), the stream with invalid configuration was already created // in the server. ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} // ErrStreamSourceNotSupported is returned when the connected nats-server // version does not support setting the stream sources. If this error is // returned when executing CreateStream(), the stream with invalid // configuration was already created in the server. ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} // ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the // connected nats-server version does not support setting the stream // sources. If this error is returned when executing CreateStream(), the // stream with invalid configuration was already created in the server. ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"} // ErrConsumerNotFound is an error returned when consumer with given name // does not exist. ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} // ErrConsumerExists is returned when attempting to create a consumer with // CreateConsumer but a consumer with given name already exists. ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}} // ErrConsumerNameExists is returned when attempting to update a consumer // with UpdateConsumer but a consumer with given name does not exist. ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}} // ErrMsgNotFound is returned when message with provided sequence number // does not exist. ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} // ErrBadRequest is returned when invalid request is sent to JetStream API. ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} // ErrConsumerCreate is returned when nats-server reports error when // creating consumer (e.g. illegal update). ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}} // ErrDuplicateFilterSubjects is returned when both FilterSubject and // FilterSubjects are specified when creating consumer. ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} // ErrDuplicateFilterSubjects is returned when filter subjects overlap when // creating consumer. ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} // Client errors // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the // connected nats-server version does not support setting multiple filter // subjects with filter_subjects field. If this error is returned when // executing AddConsumer(), the consumer with invalid configuration was // already created in the server. ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} // ErrConsumerNotFound is an error returned when consumer with given name // does not exist. ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} // ErrInvalidJSAck is returned when JetStream ack from message publish is // invalid. ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} // ErrStreamNameRequired is returned when the provided stream name is empty. ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more // than once. ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} // ErrNoStreamResponse is returned when there is no response from stream // (e.g. no responders error). ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} // ErrNotJSMessage is returned when attempting to get metadata from non // JetStream message. ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} // ErrInvalidStreamName is returned when the provided stream name is invalid // (contains '.'). ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} // ErrInvalidSubject is returned when the provided subject name is invalid. ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"} // ErrInvalidConsumerName is returned when the provided consumer name is // invalid (contains '.'). ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} // ErrNoMessages is returned when no messages are currently available for a // consumer. ErrNoMessages JetStreamError = &jsError{message: "no messages"} // ErrPinIDMismatch is returned when Pin ID sent in the request does not match // the currently pinned consumer subscriber ID on the server. ErrPinIDMismatch JetStreamError = &jsError{message: "pin ID mismatch"} // ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set // on a pull request. ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"} // ErrBatchCompleted is returned when a fetch request sent the whole batch, // but there are still bytes left. This is applicable only when MaxBytes is // set on a pull request. ErrBatchCompleted JetStreamError = &jsError{message: "batch completed"} // ErrConsumerDeleted is returned when attempting to send pull request to a // consumer which does not exist. ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} // ErrConsumerLeadershipChanged is returned when pending requests are no // longer valid after leadership has changed. ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"} // ErrHandlerRequired is returned when no handler func is provided in // Stream(). ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"} // ErrEndOfData is returned when iterating over paged API from JetStream // reaches end of data. ErrEndOfData JetStreamError = &jsError{message: "end of data reached"} // ErrNoHeartbeat is received when no message is received in IdleHeartbeat // time (if set). ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} // ErrConsumerHasActiveSubscription is returned when a consumer is already // subscribed to a stream. ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"} // ErrMsgNotBound is returned when given message is not bound to any // subscription. ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"} // ErrMsgNoReply is returned when attempting to reply to a message without a // reply subject. ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"} // ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message // is unsuccessful. ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"} // ErrAsyncPublishReplySubjectSet is returned when reply subject is set on // async message publish. ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"} // ErrTooManyStalledMsgs is returned when too many outstanding async // messages are waiting for ack. ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"} // ErrInvalidOption is returned when there is a collision between options. ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"} // ErrMsgIteratorClosed is returned when attempting to get message from a // closed iterator. ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"} // ErrOrderedConsumerReset is returned when resetting ordered consumer fails // due to too many attempts. ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"} // ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already // used to process messages using Fetch (or FetchBytes). ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"} // ErrOrderConsumerUsedAsConsume is returned when ordered consumer was // already used to process messages using Consume or Messages. ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"} // ErrOrderedConsumerConcurrentRequests is returned when attempting to run // concurrent operations on ordered consumers. ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"} // ErrOrderedConsumerNotCreated is returned when trying to get consumer info // of an ordered consumer which was not yet created. ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"} // ErrJetStreamPublisherClosed is returned for each unfinished ack future when JetStream.Cleanup is called. ErrJetStreamPublisherClosed JetStreamError = &jsError{message: "jetstream context closed"} // ErrAsyncPublishTimeout is returned when waiting for ack on async publish ErrAsyncPublishTimeout JetStreamError = &jsError{message: "timeout waiting for ack"} // KeyValue Errors // ErrKeyExists is returned when attempting to create a key that already // exists. ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} // ErrKeyValueConfigRequired is returned when attempting to create a bucket // without a config. ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"} // ErrInvalidBucketName is returned when attempting to create a bucket with // an invalid name. ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"} // ErrInvalidKey is returned when attempting to create a key with an invalid // name. ErrInvalidKey JetStreamError = &jsError{message: "invalid key"} // ErrBucketExists is returned when attempting to create a bucket that // already exists and has a different configuration. ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"} // ErrBucketNotFound is returned when attempting to access a bucket that // does not exist. ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"} // ErrBadBucket is returned when attempting to access a bucket that is not a // key-value store. ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"} // ErrKeyNotFound is returned when attempting to access a key that does not // exist. ErrKeyNotFound JetStreamError = &jsError{message: "key not found"} // ErrKeyDeleted is returned when attempting to access a key that was // deleted. ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"} // ErrHistoryToLarge is returned when provided history limit is larger than // 64. ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"} // ErrNoKeysFound is returned when no keys are found. ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"} // ErrObjectConfigRequired is returned when attempting to create an object // without a config. ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"} // ErrBadObjectMeta is returned when the meta information of an object is // invalid. ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"} // ErrObjectNotFound is returned when an object is not found. ErrObjectNotFound JetStreamError = &jsError{message: "object not found"} // ErrInvalidStoreName is returned when the name of an object-store is // invalid. ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"} // ErrDigestMismatch is returned when the digests of an object do not match. ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"} // ErrInvalidDigestFormat is returned when the digest hash of an object has // an invalid format. ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"} // ErrNoObjectsFound is returned when no objects are found. ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"} // ErrObjectAlreadyExists is returned when an object with the same name // already exists. ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"} // ErrNameRequired is returned when a name is required. ErrNameRequired JetStreamError = &jsError{message: "name is required"} // ErrLinkNotAllowed is returned when a link cannot be set when putting the // object in a bucket. ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"} // ErrObjectRequired is returned when an object is required. ErrObjectRequired = &jsError{message: "object required"} // ErrNoLinkToDeleted is returned when it is not allowed to link to a // deleted object. ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"} // ErrNoLinkToLink is returned when it is not allowed to link to another // link. ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"} // ErrCantGetBucket is returned when an invalid Get is attempted on an // object that is a link to a bucket. ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"} // ErrBucketRequired is returned when a bucket is required. ErrBucketRequired JetStreamError = &jsError{message: "bucket required"} // ErrBucketMalformed is returned when a bucket is malformed. ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"} // ErrUpdateMetaDeleted is returned when the meta information of a deleted // object cannot be updated. ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"} ) // Error prints the JetStream API error code and description. func (e *APIError) Error() string { return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description) } // APIError implements the JetStreamError interface. func (e *APIError) APIError() *APIError { return e } // Is matches against an APIError. func (e *APIError) Is(err error) bool { if e == nil { return false } // Extract internal APIError to match against. var aerr *APIError ok := errors.As(err, &aerr) if !ok { return ok } return e.ErrorCode == aerr.ErrorCode } func (err *jsError) APIError() *APIError { return err.apiErr } func (err *jsError) Error() string { if err.apiErr != nil && err.apiErr.Description != "" { return err.apiErr.Error() } return fmt.Sprintf("nats: %s", err.message) } func (err *jsError) Unwrap() error { // Allow matching to embedded APIError in case there is one. if err.apiErr == nil { return nil } return err.apiErr } nats.go-1.41.0/jetstream/jetstream.go000066400000000000000000001074671477351342400175220ustar00rootroot00000000000000// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "encoding/json" "errors" "fmt" "regexp" "strings" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) type ( // JetStream is the top-level interface for interacting with JetStream. // The capabilities of JetStream include: // // - Publishing messages to a stream using [Publisher]. // - Managing streams using [StreamManager]. // - Managing consumers using [StreamConsumerManager]. Those are the same // methods as on [Stream], but are available as a shortcut to a consumer // bypassing stream lookup. // - Managing KeyValue stores using [KeyValueManager]. // - Managing Object Stores using [ObjectStoreManager]. // // JetStream can be created using [New], [NewWithAPIPrefix] or // [NewWithDomain] methods. JetStream interface { // AccountInfo fetches account information from the server, containing details // about the account associated with this JetStream connection. If account is // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. If // the server does not have JetStream enabled, ErrJetStreamNotEnabled is // returned. AccountInfo(ctx context.Context) (*AccountInfo, error) // Conn returns the underlying NATS connection. Conn() *nats.Conn // Options returns read-only JetStreamOptions used // when making requests to JetStream. Options() JetStreamOptions StreamConsumerManager StreamManager Publisher KeyValueManager ObjectStoreManager } // Publisher provides methods for publishing messages to a stream. // It is available as a part of [JetStream] interface. // The behavior of Publisher can be customized using [PublishOpt] options. Publisher interface { // Publish performs a synchronous publish to a stream and waits for ack // from server. It accepts subject name (which must be bound to a stream) // and message payload. Publish(ctx context.Context, subject string, payload []byte, opts ...PublishOpt) (*PubAck, error) // PublishMsg performs a synchronous publish to a stream and waits for // ack from server. It accepts subject name (which must be bound to a // stream) and nats.Message. PublishMsg(ctx context.Context, msg *nats.Msg, opts ...PublishOpt) (*PubAck, error) // PublishAsync performs a publish to a stream and returns // [PubAckFuture] interface, not blocking while waiting for an // acknowledgement. It accepts subject name (which must be bound to a // stream) and message payload. // // PublishAsync does not guarantee that the message has been // received by the server. It only guarantees that the message has been // sent to the server and thus messages can be stored in the stream // out of order in case of retries. PublishAsync(subject string, payload []byte, opts ...PublishOpt) (PubAckFuture, error) // PublishMsgAsync performs a publish to a stream and returns // [PubAckFuture] interface, not blocking while waiting for an // acknowledgement. It accepts subject name (which must // be bound to a stream) and nats.Message. // // PublishMsgAsync does not guarantee that the message has been // sent to the server and thus messages can be stored in the stream // received by the server. It only guarantees that the message has been // out of order in case of retries. PublishMsgAsync(msg *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) // PublishAsyncPending returns the number of async publishes outstanding // for this context. An outstanding publish is one that has been // sent by the publisher but has not yet received an ack. PublishAsyncPending() int // PublishAsyncComplete returns a channel that will be closed when all // outstanding asynchronously published messages are acknowledged by the // server. PublishAsyncComplete() <-chan struct{} // CleanupPublisher will cleanup the publishing side of JetStreamContext. // // This will unsubscribe from the internal reply subject if needed. // All pending async publishes will fail with ErrJetStreamContextClosed. // // If an error handler was provided, it will be called for each pending async // publish and PublishAsyncComplete will be closed. // // After completing JetStreamContext is still usable - internal subscription // will be recreated on next publish, but the acks from previous publishes will // be lost. CleanupPublisher() } // StreamManager provides CRUD API for managing streams. It is available as // a part of [JetStream] interface. CreateStream, UpdateStream, // CreateOrUpdateStream and Stream methods return a [Stream] interface, allowing // to operate on a stream. StreamManager interface { // CreateStream creates a new stream with given config and returns an // interface to operate on it. If stream with given name already exists // and its configuration differs from the provided one, // ErrStreamNameAlreadyInUse is returned. CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) // UpdateStream updates an existing stream. If stream does not exist, // ErrStreamNotFound is returned. UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) // CreateOrUpdateStream creates a stream with given config. If stream // already exists, it will be updated (if possible). CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) // Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. // If stream does not exist, ErrStreamNotFound is returned. Stream(ctx context.Context, stream string) (Stream, error) // StreamNameBySubject returns a stream name stream listening on given // subject. If no stream is bound to given subject, ErrStreamNotFound // is returned. StreamNameBySubject(ctx context.Context, subject string) (string, error) // DeleteStream removes a stream with given name. If stream does not // exist, ErrStreamNotFound is returned. DeleteStream(ctx context.Context, stream string) error // ListStreams returns StreamInfoLister, enabling iterating over a // channel of stream infos. ListStreams(context.Context, ...StreamListOpt) StreamInfoLister // StreamNames returns a StreamNameLister, enabling iterating over a // channel of stream names. StreamNames(context.Context, ...StreamListOpt) StreamNameLister } // StreamConsumerManager provides CRUD API for managing consumers. It is // available as a part of [JetStream] interface. This is an alternative to // [Stream] interface, allowing to bypass stream lookup. CreateConsumer, // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a // [Consumer] interface, allowing to operate on a consumer (e.g. consume // messages). StreamConsumerManager interface { // CreateOrUpdateConsumer creates a consumer on a given stream with // given config. If consumer already exists, it will be updated (if // possible). Consumer interface is returned, allowing to operate on a // consumer (e.g. fetch messages). CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) // CreateConsumer creates a consumer on a given stream with given // config. If consumer already exists and the provided configuration // differs from its configuration, ErrConsumerExists is returned. If the // provided configuration is the same as the existing consumer, the // existing consumer is returned. Consumer interface is returned, // allowing to operate on a consumer (e.g. fetch messages). CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) // UpdateConsumer updates an existing consumer. If consumer does not // exist, ErrConsumerDoesNotExist is returned. Consumer interface is // returned, allowing to operate on a consumer (e.g. fetch messages). UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer // are managed by the library and provide a simple way to consume // messages from a stream. Ordered consumers are ephemeral in-memory // pull consumers and are resilient to deletes and restarts. OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) // Consumer returns an interface to an existing consumer, allowing processing // of messages. If consumer does not exist, ErrConsumerNotFound is // returned. Consumer(ctx context.Context, stream string, consumer string) (Consumer, error) // DeleteConsumer removes a consumer with given name from a stream. // If consumer does not exist, ErrConsumerNotFound is returned. DeleteConsumer(ctx context.Context, stream string, consumer string) error // PauseConsumer pauses a consumer until the given time. PauseConsumer(ctx context.Context, stream string, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) // ResumeConsumer resumes a paused consumer. ResumeConsumer(ctx context.Context, stream string, consumer string) (*ConsumerPauseResponse, error) } // StreamListOpt is a functional option for [StreamManager.ListStreams] and // [StreamManager.StreamNames] methods. StreamListOpt func(*streamsRequest) error // AccountInfo contains information about the JetStream usage from the // current account. AccountInfo struct { // Tier is the current account usage tier. Tier // Domain is the domain name associated with this account. Domain string `json:"domain"` // API is the API usage statistics for this account. API APIStats `json:"api"` // Tiers is the list of available tiers for this account. Tiers map[string]Tier `json:"tiers"` } // Tier represents a JetStream account usage tier. Tier struct { // Memory is the memory storage being used for Stream Message storage. Memory uint64 `json:"memory"` // Store is the disk storage being used for Stream Message storage. Store uint64 `json:"storage"` // ReservedMemory is the number of bytes reserved for memory usage by // this account on the server ReservedMemory uint64 `json:"reserved_memory"` // ReservedStore is the number of bytes reserved for disk usage by this // account on the server ReservedStore uint64 `json:"reserved_storage"` // Streams is the number of streams currently defined for this account. Streams int `json:"streams"` // Consumers is the number of consumers currently defined for this // account. Consumers int `json:"consumers"` // Limits are the JetStream limits for this account. Limits AccountLimits `json:"limits"` } // APIStats reports on API calls to JetStream for this account. APIStats struct { // Total is the total number of API calls. Total uint64 `json:"total"` // Errors is the total number of API errors. Errors uint64 `json:"errors"` } // AccountLimits includes the JetStream limits of the current account. AccountLimits struct { // MaxMemory is the maximum amount of memory available for this account. MaxMemory int64 `json:"max_memory"` // MaxStore is the maximum amount of disk storage available for this // account. MaxStore int64 `json:"max_storage"` // MaxStreams is the maximum number of streams allowed for this account. MaxStreams int `json:"max_streams"` // MaxConsumers is the maximum number of consumers allowed for this // account. MaxConsumers int `json:"max_consumers"` } jetStream struct { conn *nats.Conn opts JetStreamOptions publisher *jetStreamClient } // JetStreamOpt is a functional option for [New], [NewWithAPIPrefix] and // [NewWithDomain] methods. JetStreamOpt func(*JetStreamOptions) error // JetStreamOptions are used to configure JetStream. JetStreamOptions struct { // APIPrefix is the prefix used for JetStream API requests. APIPrefix string // Domain is the domain name token used when sending JetStream requests. Domain string // DefaultTimeout is the default timeout used for JetStream API requests. // This applies when the context passed to JetStream methods does not have // a deadline set. DefaultTimeout time.Duration publisherOpts asyncPublisherOpts // this is the actual prefix used in the API requests // it is either APIPrefix or a domain specific prefix apiPrefix string replyPrefix string replyPrefixLen int clientTrace *ClientTrace } // ClientTrace can be used to trace API interactions for [JetStream]. ClientTrace struct { // RequestSent is called when an API request is sent to the server. RequestSent func(subj string, payload []byte) // ResponseReceived is called when a response is received from the // server. ResponseReceived func(subj string, payload []byte, hdr nats.Header) } streamInfoResponse struct { apiResponse apiPaged *StreamInfo } accountInfoResponse struct { apiResponse AccountInfo } streamDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } // StreamInfoLister is used to iterate over a channel of stream infos. // Err method can be used to check for errors encountered during iteration. // Info channel is always closed and therefore can be used in a range loop. StreamInfoLister interface { Info() <-chan *StreamInfo Err() error } // StreamNameLister is used to iterate over a channel of stream names. // Err method can be used to check for errors encountered during iteration. // Name channel is always closed and therefore can be used in a range loop. StreamNameLister interface { Name() <-chan string Err() error } apiPagedRequest struct { Offset int `json:"offset"` } streamLister struct { js *jetStream offset int pageInfo *apiPaged streams chan *StreamInfo names chan string err error } streamListResponse struct { apiResponse apiPaged Streams []*StreamInfo `json:"streams"` } streamNamesResponse struct { apiResponse apiPaged Streams []string `json:"streams"` } streamsRequest struct { apiPagedRequest Subject string `json:"subject,omitempty"` } ) // defaultAPITimeout is used if context.Background() or context.TODO() is passed to API calls. const defaultAPITimeout = 5 * time.Second var subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`) // New returns a new JetStream instance. // It uses default API prefix ($JS.API) for JetStream API requests. // If a custom API prefix is required, use [NewWithAPIPrefix] or [NewWithDomain]. // // Available options: // - [WithClientTrace] - enables request/response tracing. // - [WithPublishAsyncErrHandler] - sets error handler for async message publish. // - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes // that can be inflight at one time. func New(nc *nats.Conn, opts ...JetStreamOpt) (JetStream, error) { jsOpts := JetStreamOptions{ apiPrefix: DefaultAPIPrefix, publisherOpts: asyncPublisherOpts{ maxpa: defaultAsyncPubAckInflight, }, DefaultTimeout: defaultAPITimeout, } setReplyPrefix(nc, &jsOpts) for _, opt := range opts { if err := opt(&jsOpts); err != nil { return nil, err } } js := &jetStream{ conn: nc, opts: jsOpts, publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, } return js, nil } const ( // defaultAsyncPubAckInflight is the number of async pub acks inflight. defaultAsyncPubAckInflight = 4000 ) func setReplyPrefix(nc *nats.Conn, jsOpts *JetStreamOptions) { jsOpts.replyPrefix = nats.InboxPrefix if nc.Opts.InboxPrefix != "" { jsOpts.replyPrefix = nc.Opts.InboxPrefix + "." } // Add 1 for the dot separator. jsOpts.replyPrefixLen = len(jsOpts.replyPrefix) + aReplyTokensize + 1 } // NewWithAPIPrefix returns a new JetStream instance and sets the API prefix to be used in requests to JetStream API. // The API prefix will be used in API requests to JetStream, e.g. .STREAM.INFO.. // // Available options: // - [WithClientTrace] - enables request/response tracing. // - [WithPublishAsyncErrHandler] - sets error handler for async message publish. // - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes // that can be inflight at one time. func NewWithAPIPrefix(nc *nats.Conn, apiPrefix string, opts ...JetStreamOpt) (JetStream, error) { jsOpts := JetStreamOptions{ publisherOpts: asyncPublisherOpts{ maxpa: defaultAsyncPubAckInflight, }, APIPrefix: apiPrefix, DefaultTimeout: defaultAPITimeout, } setReplyPrefix(nc, &jsOpts) for _, opt := range opts { if err := opt(&jsOpts); err != nil { return nil, err } } if apiPrefix == "" { return nil, errors.New("API prefix cannot be empty") } if !strings.HasSuffix(apiPrefix, ".") { jsOpts.apiPrefix = fmt.Sprintf("%s.", apiPrefix) } else { jsOpts.apiPrefix = apiPrefix } js := &jetStream{ conn: nc, opts: jsOpts, publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, } return js, nil } // NewWithDomain returns a new JetStream instance and sets the domain name token used when sending JetStream requests. // The domain name token will be used in API requests to JetStream, e.g. $JS..API.STREAM.INFO.. // // Available options: // - [WithClientTrace] - enables request/response tracing. // - [WithPublishAsyncErrHandler] - sets error handler for async message publish. // - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes // that can be inflight at one time. func NewWithDomain(nc *nats.Conn, domain string, opts ...JetStreamOpt) (JetStream, error) { jsOpts := JetStreamOptions{ publisherOpts: asyncPublisherOpts{ maxpa: defaultAsyncPubAckInflight, }, Domain: domain, DefaultTimeout: defaultAPITimeout, } setReplyPrefix(nc, &jsOpts) for _, opt := range opts { if err := opt(&jsOpts); err != nil { return nil, err } } if domain == "" { return nil, errors.New("domain cannot be empty") } jsOpts.apiPrefix = fmt.Sprintf(jsDomainT, domain) js := &jetStream{ conn: nc, opts: jsOpts, publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, } return js, nil } // Conn returns the underlying NATS connection. func (js *jetStream) Conn() *nats.Conn { return js.conn } func (js *jetStream) Options() JetStreamOptions { return js.opts } // CreateStream creates a new stream with given config and returns an // interface to operate on it. If stream with given name already exists, // ErrStreamNameAlreadyInUse is returned. func (js *jetStream) CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { if err := validateStreamName(cfg.Name); err != nil { return nil, err } ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } ncfg := cfg // If we have a mirror and an external domain, convert to ext.APIPrefix. if ncfg.Mirror != nil && ncfg.Mirror.Domain != "" { // Copy so we do not change the caller's version. ncfg.Mirror = ncfg.Mirror.copy() if err := ncfg.Mirror.convertDomain(); err != nil { return nil, err } } // Check sources for the same. if len(ncfg.Sources) > 0 { ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) for i, ss := range ncfg.Sources { if ss.Domain != "" { ncfg.Sources[i] = ss.copy() if err := ncfg.Sources[i].convertDomain(); err != nil { return nil, err } } } } req, err := json.Marshal(ncfg) if err != nil { return nil, err } createSubject := fmt.Sprintf(apiStreamCreateT, cfg.Name) var resp streamInfoResponse if _, err = js.apiRequestJSON(ctx, createSubject, &resp, req); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNameInUse { return nil, ErrStreamNameAlreadyInUse } return nil, resp.Error } // check that input subject transform (if used) is reflected in the returned StreamInfo if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { return nil, ErrStreamSubjectTransformNotSupported } if len(cfg.Sources) != 0 { if len(cfg.Sources) != len(resp.Config.Sources) { return nil, ErrStreamSourceNotSupported } for i := range cfg.Sources { if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported } } } return &stream{ js: js, name: cfg.Name, info: resp.StreamInfo, }, nil } // If we have a Domain, convert to the appropriate ext.APIPrefix. // This will change the stream source, so should be a copy passed in. func (ss *StreamSource) convertDomain() error { if ss.Domain == "" { return nil } if ss.External != nil { return errors.New("nats: domain and external are both set") } ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} return nil } // Helper for copying when we do not want to change user's version. func (ss *StreamSource) copy() *StreamSource { nss := *ss // Check pointers if ss.OptStartTime != nil { t := *ss.OptStartTime nss.OptStartTime = &t } if ss.External != nil { ext := *ss.External nss.External = &ext } return &nss } // UpdateStream updates an existing stream. If stream does not exist, // ErrStreamNotFound is returned. func (js *jetStream) UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { if err := validateStreamName(cfg.Name); err != nil { return nil, err } ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } req, err := json.Marshal(cfg) if err != nil { return nil, err } updateSubject := fmt.Sprintf(apiStreamUpdateT, cfg.Name) var resp streamInfoResponse if _, err = js.apiRequestJSON(ctx, updateSubject, &resp, req); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNotFound { return nil, ErrStreamNotFound } return nil, resp.Error } // check that input subject transform (if used) is reflected in the returned StreamInfo if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { return nil, ErrStreamSubjectTransformNotSupported } if len(cfg.Sources) != 0 { if len(cfg.Sources) != len(resp.Config.Sources) { return nil, ErrStreamSourceNotSupported } for i := range cfg.Sources { if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported } } } return &stream{ js: js, name: cfg.Name, info: resp.StreamInfo, }, nil } // CreateOrUpdateStream creates a stream with given config. If stream // already exists, it will be updated (if possible). func (js *jetStream) CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { s, err := js.UpdateStream(ctx, cfg) if err != nil { if !errors.Is(err, ErrStreamNotFound) { return nil, err } return js.CreateStream(ctx, cfg) } return s, nil } // Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. // If stream does not exist, ErrStreamNotFound is returned. func (js *jetStream) Stream(ctx context.Context, name string) (Stream, error) { if err := validateStreamName(name); err != nil { return nil, err } ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } infoSubject := fmt.Sprintf(apiStreamInfoT, name) var resp streamInfoResponse if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNotFound { return nil, ErrStreamNotFound } return nil, resp.Error } return &stream{ js: js, name: name, info: resp.StreamInfo, }, nil } // DeleteStream removes a stream with given name func (js *jetStream) DeleteStream(ctx context.Context, name string) error { if err := validateStreamName(name); err != nil { return err } ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } deleteSubject := fmt.Sprintf(apiStreamDeleteT, name) var resp streamDeleteResponse if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { return err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNotFound { return ErrStreamNotFound } return resp.Error } return nil } // CreateOrUpdateConsumer creates a consumer on a given stream with // given config. If consumer already exists, it will be updated (if // possible). Consumer interface is returned, allowing to operate on a // consumer (e.g. fetch messages). func (js *jetStream) CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { if err := validateStreamName(stream); err != nil { return nil, err } return upsertConsumer(ctx, js, stream, cfg, consumerActionCreateOrUpdate) } // CreateConsumer creates a consumer on a given stream with given // config. If consumer already exists and the provided configuration // differs from its configuration, ErrConsumerExists is returned. If the // provided configuration is the same as the existing consumer, the // existing consumer is returned. Consumer interface is returned, // allowing to operate on a consumer (e.g. fetch messages). func (js *jetStream) CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { if err := validateStreamName(stream); err != nil { return nil, err } return upsertConsumer(ctx, js, stream, cfg, consumerActionCreate) } // UpdateConsumer updates an existing consumer. If consumer does not // exist, ErrConsumerDoesNotExist is returned. Consumer interface is // returned, allowing to operate on a consumer (e.g. fetch messages). func (js *jetStream) UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { if err := validateStreamName(stream); err != nil { return nil, err } return upsertConsumer(ctx, js, stream, cfg, consumerActionUpdate) } // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer // are managed by the library and provide a simple way to consume // messages from a stream. Ordered consumers are ephemeral in-memory // pull consumers and are resilient to deletes and restarts. func (js *jetStream) OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) { if err := validateStreamName(stream); err != nil { return nil, err } oc := &orderedConsumer{ js: js, cfg: &cfg, stream: stream, namePrefix: nuid.Next(), doReset: make(chan struct{}, 1), } consCfg := oc.getConsumerConfig() cons, err := js.CreateOrUpdateConsumer(ctx, stream, *consCfg) if err != nil { return nil, err } oc.currentConsumer = cons.(*pullConsumer) return oc, nil } // Consumer returns an interface to an existing consumer, allowing processing // of messages. If consumer does not exist, ErrConsumerNotFound is // returned. func (js *jetStream) Consumer(ctx context.Context, stream string, name string) (Consumer, error) { if err := validateStreamName(stream); err != nil { return nil, err } return getConsumer(ctx, js, stream, name) } // DeleteConsumer removes a consumer with given name from a stream. // If consumer does not exist, ErrConsumerNotFound is returned. func (js *jetStream) DeleteConsumer(ctx context.Context, stream string, name string) error { if err := validateStreamName(stream); err != nil { return err } return deleteConsumer(ctx, js, stream, name) } func (js *jetStream) PauseConsumer(ctx context.Context, stream string, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) { if err := validateStreamName(stream); err != nil { return nil, err } return pauseConsumer(ctx, js, stream, consumer, &pauseUntil) } func (js *jetStream) ResumeConsumer(ctx context.Context, stream string, consumer string) (*ConsumerPauseResponse, error) { if err := validateStreamName(stream); err != nil { return nil, err } return resumeConsumer(ctx, js, stream, consumer) } func validateStreamName(stream string) error { if stream == "" { return ErrStreamNameRequired } if strings.ContainsAny(stream, ">*. /\\") { return fmt.Errorf("%w: '%s'", ErrInvalidStreamName, stream) } return nil } func validateSubject(subject string) error { if subject == "" { return fmt.Errorf("%w: %s", ErrInvalidSubject, "subject cannot be empty") } if subject[0] == '.' || subject[len(subject)-1] == '.' || !subjectRegexp.MatchString(subject) { return fmt.Errorf("%w: %s", ErrInvalidSubject, subject) } return nil } // AccountInfo fetches account information from the server, containing details // about the account associated with this JetStream connection. If account is // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. // // If the server does not have JetStream enabled, ErrJetStreamNotEnabled is // returned (for a single server setup). For clustered topologies, AccountInfo // will time out. func (js *jetStream) AccountInfo(ctx context.Context) (*AccountInfo, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } var resp accountInfoResponse if _, err := js.apiRequestJSON(ctx, apiAccountInfo, &resp); err != nil { if errors.Is(err, nats.ErrNoResponders) { return nil, ErrJetStreamNotEnabled } return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabledForAccount { return nil, ErrJetStreamNotEnabledForAccount } if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabled { return nil, ErrJetStreamNotEnabled } return nil, resp.Error } return &resp.AccountInfo, nil } // ListStreams returns StreamInfoLister, enabling iterating over a // channel of stream infos. func (js *jetStream) ListStreams(ctx context.Context, opts ...StreamListOpt) StreamInfoLister { l := &streamLister{ js: js, streams: make(chan *StreamInfo), } var streamsReq streamsRequest for _, opt := range opts { if err := opt(&streamsReq); err != nil { l.err = err close(l.streams) return l } } go func() { defer close(l.streams) ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } for { page, err := l.streamInfos(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { l.err = err return } for _, info := range page { select { case l.streams <- info: case <-ctx.Done(): l.err = ctx.Err() return } } if errors.Is(err, ErrEndOfData) { return } } }() return l } // Info returns a channel allowing retrieval of stream infos returned by [ListStreams] func (s *streamLister) Info() <-chan *StreamInfo { return s.streams } // Err returns an error channel which will be populated with error from [ListStreams] or [StreamNames] request func (s *streamLister) Err() error { return s.err } // StreamNames returns a StreamNameLister, enabling iterating over a // channel of stream names. func (js *jetStream) StreamNames(ctx context.Context, opts ...StreamListOpt) StreamNameLister { l := &streamLister{ js: js, names: make(chan string), } var streamsReq streamsRequest for _, opt := range opts { if err := opt(&streamsReq); err != nil { l.err = err close(l.names) return l } } go func() { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } defer close(l.names) for { page, err := l.streamNames(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { l.err = err return } for _, info := range page { select { case l.names <- info: case <-ctx.Done(): l.err = ctx.Err() return } } if errors.Is(err, ErrEndOfData) { return } } }() return l } // StreamNameBySubject returns a stream name stream listening on given // subject. If no stream is bound to given subject, ErrStreamNotFound // is returned. func (js *jetStream) StreamNameBySubject(ctx context.Context, subject string) (string, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } if err := validateSubject(subject); err != nil { return "", err } r := &streamsRequest{Subject: subject} req, err := json.Marshal(r) if err != nil { return "", err } var resp streamNamesResponse _, err = js.apiRequestJSON(ctx, apiStreams, &resp, req) if err != nil { return "", err } if resp.Error != nil { return "", resp.Error } if len(resp.Streams) == 0 { return "", ErrStreamNotFound } return resp.Streams[0], nil } // Name returns a channel allowing retrieval of stream names returned by [StreamNames] func (s *streamLister) Name() <-chan string { return s.names } // infos fetches the next [StreamInfo] page func (s *streamLister) streamInfos(ctx context.Context, streamsReq streamsRequest) ([]*StreamInfo, error) { if s.pageInfo != nil && s.offset >= s.pageInfo.Total { return nil, ErrEndOfData } req := streamsRequest{ apiPagedRequest: apiPagedRequest{ Offset: s.offset, }, Subject: streamsReq.Subject, } reqJSON, err := json.Marshal(req) if err != nil { return nil, err } var resp streamListResponse _, err = s.js.apiRequestJSON(ctx, apiStreamListT, &resp, reqJSON) if err != nil { return nil, err } if resp.Error != nil { return nil, resp.Error } s.pageInfo = &resp.apiPaged s.offset += len(resp.Streams) return resp.Streams, nil } // streamNames fetches the next stream names page func (s *streamLister) streamNames(ctx context.Context, streamsReq streamsRequest) ([]string, error) { if s.pageInfo != nil && s.offset >= s.pageInfo.Total { return nil, ErrEndOfData } req := streamsRequest{ apiPagedRequest: apiPagedRequest{ Offset: s.offset, }, Subject: streamsReq.Subject, } reqJSON, err := json.Marshal(req) if err != nil { return nil, err } var resp streamNamesResponse _, err = s.js.apiRequestJSON(ctx, apiStreams, &resp, reqJSON) if err != nil { return nil, err } if resp.Error != nil { return nil, resp.Error } s.pageInfo = &resp.apiPaged s.offset += len(resp.Streams) return resp.Streams, nil } // wrapContextWithoutDeadline wraps context without deadline with default timeout. // If deadline is already set, it will be returned as is, and cancel() will be nil. // Caller should check if cancel() is nil before calling it. func (js *jetStream) wrapContextWithoutDeadline(ctx context.Context) (context.Context, context.CancelFunc) { if _, ok := ctx.Deadline(); ok { return ctx, nil } return context.WithTimeout(ctx, js.opts.DefaultTimeout) } // CleanupPublisher will cleanup the publishing side of JetStreamContext. // // This will unsubscribe from the internal reply subject if needed. // All pending async publishes will fail with ErrJetStreamContextClosed. // // If an error handler was provided, it will be called for each pending async // publish and PublishAsyncComplete will be closed. // // After completing JetStreamContext is still usable - internal subscription // will be recreated on next publish, but the acks from previous publishes will // be lost. func (js *jetStream) CleanupPublisher() { js.cleanupReplySub() js.publisher.Lock() errCb := js.publisher.aecb for id, paf := range js.publisher.acks { paf.err = ErrJetStreamPublisherClosed if paf.errCh != nil { paf.errCh <- paf.err } if errCb != nil { // call error handler after releasing the mutex to avoid contention defer errCb(js, paf.msg, ErrJetStreamPublisherClosed) } delete(js.publisher.acks, id) } if js.publisher.doneCh != nil { close(js.publisher.doneCh) js.publisher.doneCh = nil } js.publisher.Unlock() } func (js *jetStream) cleanupReplySub() { if js.publisher == nil { return } js.publisher.Lock() if js.publisher.replySub != nil { js.publisher.replySub.Unsubscribe() js.publisher.replySub = nil } if js.publisher.connStatusCh != nil { close(js.publisher.connStatusCh) js.publisher.connStatusCh = nil } js.publisher.Unlock() } nats.go-1.41.0/jetstream/jetstream_options.go000066400000000000000000000511401477351342400212570ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "fmt" "time" ) type pullOptFunc func(*consumeOpts) error func (fn pullOptFunc) configureConsume(opts *consumeOpts) error { return fn(opts) } func (fn pullOptFunc) configureMessages(opts *consumeOpts) error { return fn(opts) } // WithClientTrace enables request/response API calls tracing. func WithClientTrace(ct *ClientTrace) JetStreamOpt { return func(opts *JetStreamOptions) error { opts.clientTrace = ct return nil } } // WithPublishAsyncErrHandler sets error handler for async message publish. func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt { return func(opts *JetStreamOptions) error { opts.publisherOpts.aecb = cb return nil } } // WithPublishAsyncMaxPending sets the maximum outstanding async publishes that // can be inflight at one time. func WithPublishAsyncMaxPending(max int) JetStreamOpt { return func(opts *JetStreamOptions) error { if max < 1 { return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption) } opts.publisherOpts.maxpa = max return nil } } // WithPublishAsyncTimeout sets the timeout for async message publish. // If not provided, timeout is disabled. func WithPublishAsyncTimeout(dur time.Duration) JetStreamOpt { return func(opts *JetStreamOptions) error { opts.publisherOpts.ackTimeout = dur return nil } } // WithDefaultTimeout sets the default timeout for JetStream API requests. // It is used when context used for the request does not have a deadline set. // If not provided, a default of 5 seconds will be used. func WithDefaultTimeout(timeout time.Duration) JetStreamOpt { return func(opts *JetStreamOptions) error { if timeout <= 0 { return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) } opts.DefaultTimeout = timeout return nil } } // WithPurgeSubject sets a specific subject for which messages on a stream will // be purged func WithPurgeSubject(subject string) StreamPurgeOpt { return func(req *StreamPurgeRequest) error { req.Subject = subject return nil } } // WithPurgeSequence is used to set a specific sequence number up to which (but // not including) messages will be purged from a stream Can be combined with // [WithPurgeSubject] option, but not with [WithPurgeKeep] func WithPurgeSequence(sequence uint64) StreamPurgeOpt { return func(req *StreamPurgeRequest) error { if req.Keep != 0 { return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) } req.Sequence = sequence return nil } } // WithPurgeKeep sets the number of messages to be kept in the stream after // purge. Can be combined with [WithPurgeSubject] option, but not with // [WithPurgeSequence] func WithPurgeKeep(keep uint64) StreamPurgeOpt { return func(req *StreamPurgeRequest) error { if req.Sequence != 0 { return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) } req.Keep = keep return nil } } // WithGetMsgSubject sets the stream subject from which the message should be // retrieved. Server will return a first message with a seq >= to the input seq // that has the specified subject. func WithGetMsgSubject(subject string) GetMsgOpt { return func(req *apiMsgGetRequest) error { req.NextFor = subject return nil } } // PullMaxMessages limits the number of messages to be buffered in the client. // If not provided, a default of 500 messages will be used. // This option is exclusive with PullMaxBytes. // // PullMaxMessages implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullMaxMessages int func (max PullMaxMessages) configureConsume(opts *consumeOpts) error { if max <= 0 { return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) } opts.MaxMessages = int(max) return nil } func (max PullMaxMessages) configureMessages(opts *consumeOpts) error { if max <= 0 { return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) } opts.MaxMessages = int(max) return nil } type pullMaxMessagesWithBytesLimit struct { maxMessages int maxBytes int } // PullMaxMessagesWithBytesLimit limits the number of messages to be buffered // in the client. Additionally, it sets the maximum size a single fetch request // can have. Note that this will not limit the total size of messages buffered // in the client, but rather can serve as a way to limit what nats server will // have to internally buffer for a single fetch request. // // The byte limit should never be set to a value lower than the maximum message // size that can be expected from the server. If the byte limit is lower than // the maximum message size, the consumer will stall and not be able to consume // messages. // // This is an advanced option and should be used with caution. Most users should // use [PullMaxMessages] or [PullMaxBytes] instead. // // PullMaxMessagesWithBytesLimit implements both PullConsumeOpt and // PullMessagesOpt, allowing it to configure Consumer.Consume and Consumer.Messages. func PullMaxMessagesWithBytesLimit(maxMessages, byteLimit int) pullMaxMessagesWithBytesLimit { return pullMaxMessagesWithBytesLimit{maxMessages, byteLimit} } func (m pullMaxMessagesWithBytesLimit) configureConsume(opts *consumeOpts) error { if m.maxMessages <= 0 { return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) } if m.maxBytes <= 0 { return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption) } if opts.MaxMessages > 0 { return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption) } opts.MaxMessages = m.maxMessages opts.MaxBytes = m.maxBytes opts.LimitSize = true return nil } func (m pullMaxMessagesWithBytesLimit) configureMessages(opts *consumeOpts) error { if m.maxMessages <= 0 { return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) } if m.maxBytes <= 0 { return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption) } if opts.MaxMessages > 0 { return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption) } opts.MaxMessages = m.maxMessages opts.MaxBytes = m.maxBytes opts.LimitSize = true return nil } // PullExpiry sets timeout on a single pull request, waiting until at least one // message is available. // If not provided, a default of 30 seconds will be used. // // PullExpiry implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullExpiry time.Duration func (exp PullExpiry) configureConsume(opts *consumeOpts) error { expiry := time.Duration(exp) if expiry < time.Second { return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) } opts.Expires = expiry return nil } func (exp PullExpiry) configureMessages(opts *consumeOpts) error { expiry := time.Duration(exp) if expiry < time.Second { return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) } opts.Expires = expiry return nil } // PullMaxBytes limits the number of bytes to be buffered in the client. // If not provided, the limit is not set (max messages will be used instead). // This option is exclusive with PullMaxMessages. // // The value should be set to a high enough value to accommodate the largest // message expected from the server. Note that it may not be sufficient to set // this value to the maximum message size, as this setting controls the client // buffer size, not the max bytes requested from the server within a single pull // request. If the value is set too low, the consumer will stall and not be able // to consume messages. // // PullMaxBytes implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullMaxBytes int func (max PullMaxBytes) configureConsume(opts *consumeOpts) error { if max <= 0 { return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) } opts.MaxBytes = int(max) return nil } func (max PullMaxBytes) configureMessages(opts *consumeOpts) error { if max <= 0 { return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) } opts.MaxBytes = int(max) return nil } // PullThresholdMessages sets the message count on which consuming will trigger // new pull request to the server. Defaults to 50% of MaxMessages. // // PullThresholdMessages implements both PullConsumeOpt and PullMessagesOpt, // allowing it to configure Consumer.Consume and Consumer.Messages. type PullThresholdMessages int func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error { opts.ThresholdMessages = int(t) return nil } func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error { opts.ThresholdMessages = int(t) return nil } // PullThresholdBytes sets the byte count on which consuming will trigger // new pull request to the server. Defaults to 50% of MaxBytes (if set). // // PullThresholdBytes implements both PullConsumeOpt and PullMessagesOpt, // allowing it to configure Consumer.Consume and Consumer.Messages. type PullThresholdBytes int func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error { opts.ThresholdBytes = int(t) return nil } func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error { opts.ThresholdBytes = int(t) return nil } // PullMinPending sets the minimum number of messages that should be pending for // a consumer with PriorityPolicyOverflow to be considered for delivery. // If provided, PullPriorityGroup must be set as well and the consumer has to have // PriorityPolicy set to PriorityPolicyOverflow. // // PullMinPending implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullMinPending int func (min PullMinPending) configureConsume(opts *consumeOpts) error { if min < 1 { return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) } opts.MinPending = int64(min) return nil } func (min PullMinPending) configureMessages(opts *consumeOpts) error { if min < 1 { return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) } opts.MinPending = int64(min) return nil } // PullMinAckPending sets the minimum number of pending acks that should be // present for a consumer with PriorityPolicyOverflow to be considered for // delivery. If provided, PullPriorityGroup must be set as well and the consumer // has to have PriorityPolicy set to PriorityPolicyOverflow. // // PullMinAckPending implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullMinAckPending int func (min PullMinAckPending) configureConsume(opts *consumeOpts) error { if min < 1 { return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) } opts.MinAckPending = int64(min) return nil } func (min PullMinAckPending) configureMessages(opts *consumeOpts) error { if min < 1 { return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) } opts.MinAckPending = int64(min) return nil } // PullPriorityGroup sets the priority group for a consumer. // It has to match one of the priority groups set on the consumer. // // PullPriorityGroup implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullPriorityGroup string func (g PullPriorityGroup) configureConsume(opts *consumeOpts) error { opts.Group = string(g) return nil } func (g PullPriorityGroup) configureMessages(opts *consumeOpts) error { opts.Group = string(g) return nil } // PullHeartbeat sets the idle heartbeat duration for a pull subscription // If a client does not receive a heartbeat message from a stream for more // than the idle heartbeat setting, the subscription will be removed // and error will be passed to the message handler. // If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds) // // PullHeartbeat implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type PullHeartbeat time.Duration func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error { hbTime := time.Duration(hb) if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) } opts.Heartbeat = hbTime return nil } func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error { hbTime := time.Duration(hb) if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) } opts.Heartbeat = hbTime return nil } // StopAfter sets the number of messages after which the consumer is // automatically stopped and no more messages are pulled from the server. // // StopAfter implements both PullConsumeOpt and PullMessagesOpt, allowing // it to configure Consumer.Consume and Consumer.Messages. type StopAfter int func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error { if nMsgs <= 0 { return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) } opts.StopAfter = int(nMsgs) return nil } func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error { if nMsgs <= 0 { return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) } opts.StopAfter = int(nMsgs) return nil } // ConsumeErrHandler sets custom error handler invoked when an error was // encountered while consuming messages It will be invoked for both terminal // (Consumer Deleted, invalid request body) and non-terminal (e.g. missing // heartbeats) errors. func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt { return pullOptFunc(func(cfg *consumeOpts) error { cfg.ErrHandler = cb return nil }) } // WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error // should be reported when calling [MessagesContext.Next] (Default: true). func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt { return pullOptFunc(func(cfg *consumeOpts) error { cfg.ReportMissingHeartbeats = hbErr return nil }) } // FetchMinPending sets the minimum number of messages that should be pending for // a consumer with PriorityPolicyOverflow to be considered for delivery. // If provided, FetchPriorityGroup must be set as well and the consumer has to have // PriorityPolicy set to PriorityPolicyOverflow. func FetchMinPending(min int64) FetchOpt { return func(req *pullRequest) error { if min < 1 { return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) } req.MinPending = min return nil } } // FetchMinAckPending sets the minimum number of pending acks that should be // present for a consumer with PriorityPolicyOverflow to be considered for // delivery. If provided, FetchPriorityGroup must be set as well and the consumer // has to have PriorityPolicy set to PriorityPolicyOverflow. func FetchMinAckPending(min int64) FetchOpt { return func(req *pullRequest) error { if min < 1 { return fmt.Errorf("%w: min ack pending should be more than 0", ErrInvalidOption) } req.MinAckPending = min return nil } } // FetchPriorityGroup sets the priority group for a consumer. // It has to match one of the priority groups set on the consumer. func FetchPriorityGroup(group string) FetchOpt { return func(req *pullRequest) error { req.Group = group return nil } } // FetchMaxWait sets custom timeout for fetching predefined batch of messages. // // If not provided, a default of 30 seconds will be used. func FetchMaxWait(timeout time.Duration) FetchOpt { return func(req *pullRequest) error { if timeout <= 0 { return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) } req.Expires = timeout return nil } } // FetchHeartbeat sets custom heartbeat for individual fetch request. If a // client does not receive a heartbeat message from a stream for more than 2 // times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. // // Heartbeat value has to be lower than FetchMaxWait / 2. // // If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s // and disabled otherwise. func FetchHeartbeat(hb time.Duration) FetchOpt { return func(req *pullRequest) error { if hb <= 0 { return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) } req.Heartbeat = hb return nil } } // WithDeletedDetails can be used to display the information about messages // deleted from a stream on a stream info request func WithDeletedDetails(deletedDetails bool) StreamInfoOpt { return func(req *streamInfoRequest) error { req.DeletedDetails = deletedDetails return nil } } // WithSubjectFilter can be used to display the information about messages // stored on given subjects. // NOTE: if the subject filter matches over 100k // subjects, this will result in multiple requests to the server to retrieve all // the information, and all of the returned subjects will be kept in memory. func WithSubjectFilter(subject string) StreamInfoOpt { return func(req *streamInfoRequest) error { req.SubjectFilter = subject return nil } } // WithStreamListSubject can be used to filter results of ListStreams and // StreamNames requests to only streams that have given subject in their // configuration. func WithStreamListSubject(subject string) StreamListOpt { return func(req *streamsRequest) error { req.Subject = subject return nil } } // WithMsgID sets the message ID used for deduplication. func WithMsgID(id string) PublishOpt { return func(opts *pubOpts) error { opts.id = id return nil } } // WithMsgTTL sets per msg TTL. // Requires [StreamConfig.AllowMsgTTL] to be enabled. func WithMsgTTL(dur time.Duration) PublishOpt { return func(opts *pubOpts) error { opts.ttl = dur return nil } } // WithExpectStream sets the expected stream the message should be published to. // If the message is published to a different stream server will reject the // message and publish will fail. func WithExpectStream(stream string) PublishOpt { return func(opts *pubOpts) error { opts.stream = stream return nil } } // WithExpectLastSequence sets the expected sequence number the last message // on a stream should have. If the last message has a different sequence number // server will reject the message and publish will fail. func WithExpectLastSequence(seq uint64) PublishOpt { return func(opts *pubOpts) error { opts.lastSeq = &seq return nil } } // WithExpectLastSequencePerSubject sets the expected sequence number the last // message on a subject the message is published to. If the last message on a // subject has a different sequence number server will reject the message and // publish will fail. func WithExpectLastSequencePerSubject(seq uint64) PublishOpt { return func(opts *pubOpts) error { opts.lastSubjectSeq = &seq return nil } } // WithExpectLastMsgID sets the expected message ID the last message on a stream // should have. If the last message has a different message ID server will // reject the message and publish will fail. func WithExpectLastMsgID(id string) PublishOpt { return func(opts *pubOpts) error { opts.lastMsgID = id return nil } } // WithRetryWait sets the retry wait time when ErrNoResponders is encountered. // Defaults to 250ms. func WithRetryWait(dur time.Duration) PublishOpt { return func(opts *pubOpts) error { if dur <= 0 { return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption) } opts.retryWait = dur return nil } } // WithRetryAttempts sets the retry number of attempts when ErrNoResponders is // encountered. Defaults to 2 func WithRetryAttempts(num int) PublishOpt { return func(opts *pubOpts) error { if num < 0 { return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption) } opts.retryAttempts = num return nil } } // WithStallWait sets the max wait when the producer becomes stall producing // messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs // is returned. func WithStallWait(ttl time.Duration) PublishOpt { return func(opts *pubOpts) error { if ttl <= 0 { return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption) } opts.stallWait = ttl return nil } } nats.go-1.41.0/jetstream/jetstream_test.go000066400000000000000000000321521477351342400205450ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "errors" "fmt" "testing" "time" "github.com/nats-io/nats.go" ) func TestMessageMetadata(t *testing.T) { tests := []struct { name string givenReply string expectedMetadata MsgMetadata withError error }{ { name: "valid metadata", givenReply: "$JS.ACK.domain.hash-123.stream.cons.5.10.20.123456789.1.token", expectedMetadata: MsgMetadata{ Sequence: SequencePair{ Consumer: 20, Stream: 10, }, NumDelivered: 5, NumPending: 1, Timestamp: time.Unix(0, 123456789), Stream: "stream", Consumer: "cons", Domain: "domain", }, }, { name: "no reply subject", givenReply: "", withError: ErrMsgNoReply, }, { name: "not a JetStream message", givenReply: "ABC", withError: ErrNotJSMessage, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { msg := &jetStreamMsg{ msg: &nats.Msg{ Reply: test.givenReply, Sub: &nats.Subscription{}, }, } res, err := msg.Metadata() if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if *res != test.expectedMetadata { t.Fatalf("Invalid metadata; want: %v; got: %v", test.expectedMetadata, res) } }) } } func TestValidateSubject(t *testing.T) { tests := []struct { subject string withError bool }{ {"test.A", false}, {"test.*", false}, {"*", false}, {"*.*", false}, {"test.*.A", false}, {"test.>", false}, {">", false}, {">.", true}, {"test.>.A", true}, {"", true}, {"test A", true}, } for _, test := range tests { tName := fmt.Sprintf("subj=%s,err=%t", test.subject, test.withError) t.Run(tName, func(t *testing.T) { err := validateSubject(test.subject) if test.withError { if err == nil { t.Fatal("Expected error; got nil") } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } }) } } func TestRetryWithBackoff(t *testing.T) { tests := []struct { name string givenOpts backoffOpts withError bool timeout time.Duration cancelAfter time.Duration successfulAttemptNum int expectedAttemptsCount int }{ { name: "infinite attempts, 5 tries before success", givenOpts: backoffOpts{ attempts: -1, initialInterval: 10 * time.Millisecond, maxInterval: 60 * time.Millisecond, }, withError: false, successfulAttemptNum: 5, // 0ms + 10ms + 20ms + 40ms + 60ms = 130ms timeout: 200 * time.Millisecond, expectedAttemptsCount: 5, }, { name: "infinite attempts, 5 tries before success, without initial execution", givenOpts: backoffOpts{ attempts: -1, initialInterval: 10 * time.Millisecond, disableInitialExecution: true, factor: 2, maxInterval: 60 * time.Millisecond, }, withError: false, successfulAttemptNum: 5, // 10ms + 20ms + 40ms + 60ms + 60 = 190ms timeout: 250 * time.Millisecond, expectedAttemptsCount: 5, }, { name: "5 attempts, unsuccessful", givenOpts: backoffOpts{ attempts: 5, initialInterval: 10 * time.Millisecond, factor: 2, maxInterval: 60 * time.Millisecond, }, withError: true, // 0ms + 10ms + 20ms + 40ms + 60ms = 130ms timeout: 200 * time.Millisecond, expectedAttemptsCount: 5, }, { name: "custom backoff values, should override other settings", givenOpts: backoffOpts{ initialInterval: 2 * time.Second, factor: 2, maxInterval: 100 * time.Millisecond, customBackoff: []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond, 40 * time.Millisecond, 50 * time.Millisecond}, }, withError: false, successfulAttemptNum: 4, // 10ms + 20ms + 30ms + 40ms = 100ms timeout: 150 * time.Millisecond, expectedAttemptsCount: 4, }, { name: "no custom backoff, with cancel", givenOpts: backoffOpts{ attempts: -1, initialInterval: 100 * time.Millisecond, factor: 1, }, withError: false, cancelAfter: 150 * time.Millisecond, timeout: 200 * time.Millisecond, expectedAttemptsCount: 2, }, { name: "custom backoff, with cancel", givenOpts: backoffOpts{ customBackoff: []time.Duration{100 * time.Millisecond, 100 * time.Millisecond, 100 * time.Millisecond}, }, cancelAfter: 150 * time.Millisecond, expectedAttemptsCount: 1, timeout: 200 * time.Millisecond, }, { name: "attempts num not provided", givenOpts: backoffOpts{ initialInterval: 100 * time.Millisecond, factor: 1, }, withError: true, timeout: 1 * time.Second, expectedAttemptsCount: 1, }, { name: "custom backoff, but attempts num provided", givenOpts: backoffOpts{ customBackoff: []time.Duration{100 * time.Millisecond, 100 * time.Millisecond, 100 * time.Millisecond}, attempts: 5, }, withError: true, timeout: 1 * time.Second, expectedAttemptsCount: 1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ok := make(chan struct{}) errs := make(chan error, 1) var cancelChan chan struct{} if test.cancelAfter != 0 { cancelChan = make(chan struct{}) test.givenOpts.cancel = cancelChan } var count int go func() { err := retryWithBackoff(func(attempt int) (bool, error) { count = attempt if test.successfulAttemptNum != 0 && attempt == test.successfulAttemptNum-1 { return false, nil } return true, fmt.Errorf("error %d", attempt) }, test.givenOpts) if err != nil { errs <- err return } close(ok) }() if test.cancelAfter > 0 { go func() { time.Sleep(test.cancelAfter) close(cancelChan) }() } select { case <-ok: if test.withError { t.Fatal("Expected error; got nil") } case err := <-errs: if !test.withError { t.Fatalf("Unexpected error: %v", err) } case <-time.After(test.timeout): t.Fatalf("Timeout after %v", test.timeout) } if count != test.expectedAttemptsCount-1 { t.Fatalf("Invalid count; want: %d; got: %d", test.expectedAttemptsCount, count) } }) } } func TestPullConsumer_checkPending(t *testing.T) { tests := []struct { name string givenSub *pullSubscription fetchInProgress bool shouldSend bool expectedPullRequest *pullRequest }{ { name: "msgs threshold not reached, bytes not set, no pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ msgCount: 10, }, consumeOpts: &consumeOpts{ ThresholdMessages: 5, MaxMessages: 10, }, }, shouldSend: false, }, { name: "pending msgs below threshold, send pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ msgCount: 4, byteCount: 400, // byte count should be ignored }, consumeOpts: &consumeOpts{ ThresholdMessages: 5, MaxMessages: 10, }, }, shouldSend: true, expectedPullRequest: &pullRequest{ Batch: 6, MaxBytes: 0, }, }, { name: "pending msgs below threshold but PR in progress", givenSub: &pullSubscription{ pending: pendingMsgs{ msgCount: 4, }, consumeOpts: &consumeOpts{ ThresholdMessages: 5, MaxMessages: 10, }, }, fetchInProgress: true, shouldSend: false, }, { name: "pending bytes below threshold, send pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ byteCount: 400, msgCount: 1000000, // msgs count should be ignored }, consumeOpts: &consumeOpts{ MaxMessages: 1000000, ThresholdBytes: 500, MaxBytes: 1000, }, }, shouldSend: true, expectedPullRequest: &pullRequest{ Batch: 1000000, MaxBytes: 600, }, }, { name: "pending bytes above threshold, no pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ byteCount: 600, }, consumeOpts: &consumeOpts{ ThresholdBytes: 500, MaxBytes: 1000, }, }, shouldSend: false, }, { name: "pending bytes below threshold, fetch in progress, no pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ byteCount: 400, }, consumeOpts: &consumeOpts{ ThresholdBytes: 500, MaxBytes: 1000, }, }, fetchInProgress: true, shouldSend: false, }, { name: "StopAfter set, pending msgs below StopAfter, send pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ msgCount: 4, }, consumeOpts: &consumeOpts{ ThresholdMessages: 5, MaxMessages: 10, StopAfter: 8, }, delivered: 2, }, shouldSend: true, expectedPullRequest: &pullRequest{ Batch: 2, // StopAfter (8) - delivered (2) - pending (4) MaxBytes: 0, }, }, { name: "StopAfter set, pending msgs equal to StopAfter, no pull request", givenSub: &pullSubscription{ pending: pendingMsgs{ msgCount: 6, }, consumeOpts: &consumeOpts{ ThresholdMessages: 5, MaxMessages: 10, StopAfter: 6, }, delivered: 0, }, shouldSend: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { prChan := make(chan *pullRequest, 1) test.givenSub.fetchNext = prChan if test.fetchInProgress { test.givenSub.fetchInProgress.Store(1) } errs := make(chan error, 1) ok := make(chan struct{}, 1) go func() { if test.shouldSend { select { case pr := <-prChan: if *pr != *test.expectedPullRequest { errs <- fmt.Errorf("Invalid pull request; want: %#v; got: %#v", test.expectedPullRequest, pr) return } ok <- struct{}{} case <-time.After(1 * time.Second): errs <- errors.New("Timeout") return } } else { select { case <-prChan: errs <- errors.New("Unexpected pull request") case <-time.After(100 * time.Millisecond): ok <- struct{}{} return } } }() test.givenSub.checkPending() select { case <-ok: // ok case err := <-errs: t.Fatal(err) } }) } } func TestKV_keyValid(t *testing.T) { tests := []struct { key string ok bool }{ {key: "foo123", ok: true}, {key: "foo.bar", ok: true}, {key: "Foo.123=bar_baz-abc", ok: true}, {key: "foo.*.bar", ok: false}, {key: "foo.>", ok: false}, {key: ">", ok: false}, {key: "*", ok: false}, {key: "foo!", ok: false}, {key: "foo bar", ok: false}, {key: "", ok: false}, {key: " ", ok: false}, {key: ".", ok: false}, {key: ".foo", ok: false}, {key: "foo.", ok: false}, } for _, test := range tests { t.Run(test.key, func(t *testing.T) { res := keyValid(test.key) if res != test.ok { t.Fatalf("Invalid result; want: %v; got: %v", test.ok, res) } }) } } func TestKV_searchKeyValid(t *testing.T) { tests := []struct { key string ok bool }{ {key: "foo123", ok: true}, {key: "foo.bar", ok: true}, {key: "Foo.123=bar_baz-abc", ok: true}, {key: "foo.*.bar", ok: true}, {key: "foo.>", ok: true}, {key: ">", ok: true}, {key: "*", ok: true}, {key: "foo!", ok: false}, {key: "foo bar", ok: false}, {key: "", ok: false}, {key: " ", ok: false}, {key: ".", ok: false}, {key: ".foo", ok: false}, {key: "foo.", ok: false}, } for _, test := range tests { t.Run(test.key, func(t *testing.T) { res := searchKeyValid(test.key) if res != test.ok { t.Fatalf("Invalid result; want: %v; got: %v", test.ok, res) } }) } } func TestKV_bucketValid(t *testing.T) { tests := []struct { key string ok bool }{ {key: "foo123", ok: true}, {key: "Foo123-bar_baz", ok: true}, {key: "foo.bar", ok: false}, {key: "foo.*.bar", ok: false}, {key: "foo.>", ok: false}, {key: ">", ok: false}, {key: "*", ok: false}, {key: "foo!", ok: false}, {key: "foo bar", ok: false}, {key: "", ok: false}, {key: " ", ok: false}, {key: ".", ok: false}, {key: ".foo", ok: false}, {key: "foo.", ok: false}, } for _, test := range tests { t.Run(test.key, func(t *testing.T) { res := bucketValid(test.key) if res != test.ok { t.Fatalf("Invalid result; want: %v; got: %v", test.ok, res) } }) } } nats.go-1.41.0/jetstream/kv.go000066400000000000000000001233741477351342400161370ustar00rootroot00000000000000// Copyright 2023-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "errors" "fmt" "reflect" "regexp" "strconv" "strings" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/internal/parser" ) type ( // KeyValueManager is used to manage KeyValue stores. It provides methods to // create, delete, and retrieve KeyValue stores. KeyValueManager interface { // KeyValue will lookup and bind to an existing KeyValue store. // // If the KeyValue store with given name does not exist, // ErrBucketNotFound will be returned. KeyValue(ctx context.Context, bucket string) (KeyValue, error) // CreateKeyValue will create a KeyValue store with the given // configuration. // // If a KeyValue store with the same name already exists and the // configuration is different, ErrBucketExists will be returned. CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) // UpdateKeyValue will update an existing KeyValue store with the given // configuration. // // If a KeyValue store with the given name does not exist, ErrBucketNotFound // will be returned. UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) // CreateOrUpdateKeyValue will create a KeyValue store if it does not // exist or update an existing KeyValue store with the given // configuration (if possible). CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) // DeleteKeyValue will delete this KeyValue store. // // If the KeyValue store with given name does not exist, // ErrBucketNotFound will be returned. DeleteKeyValue(ctx context.Context, bucket string) error // KeyValueStoreNames is used to retrieve a list of key value store // names. It returns a KeyValueNamesLister exposing a channel to read // the names from. The lister will always close the channel when done // (either all names have been read or an error occurred) and therefore // can be used in range loops. KeyValueStoreNames(ctx context.Context) KeyValueNamesLister // KeyValueStores is used to retrieve a list of key value store // statuses. It returns a KeyValueLister exposing a channel to read the // statuses from. The lister will always close the channel when done // (either all statuses have been read or an error occurred) and // therefore can be used in range loops. KeyValueStores(ctx context.Context) KeyValueLister } // KeyValue contains methods to operate on a KeyValue store. // Using the KeyValue interface, it is possible to: // // - Get, Put, Create, Update, Delete and Purge a key // - Watch for updates to keys // - List all keys // - Retrieve historical values for a key // - Retrieve status and configuration of a key value bucket // - Purge all delete markers // - Close the KeyValue store KeyValue interface { // Get returns the latest value for the key. If the key does not exist, // ErrKeyNotFound will be returned. Get(ctx context.Context, key string) (KeyValueEntry, error) // GetRevision returns a specific revision value for the key. If the key // does not exist or the provided revision does not exists, // ErrKeyNotFound will be returned. GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) // Put will place the new value for the key into the store. If the key // does not exist, it will be created. If the key exists, the value will // be updated. // // A key has to consist of alphanumeric characters, dashes, underscores, // equal signs, and dots. Put(ctx context.Context, key string, value []byte) (uint64, error) // PutString will place the string for the key into the store. If the // key does not exist, it will be created. If the key exists, the value // will be updated. // // A key has to consist of alphanumeric characters, dashes, underscores, // equal signs, and dots. PutString(ctx context.Context, key string, value string) (uint64, error) // Create will add the key/value pair if it does not exist. If the key // already exists, ErrKeyExists will be returned. // // A key has to consist of alphanumeric characters, dashes, underscores, // equal signs, and dots. Create(ctx context.Context, key string, value []byte) (uint64, error) // Update will update the value if the latest revision matches. // If the provided revision is not the latest, Update will return an error. // Update also resets the TTL associated with the key (if any). Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) // Delete will place a delete marker and leave all revisions. A history // of a deleted key can still be retrieved by using the History method // or a watch on the key. [Delete] is a non-destructive operation and // will not remove any previous revisions from the underlying stream. // // [LastRevision] option can be specified to only perform delete if the // latest revision the provided one. Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error // Purge will place a delete marker and remove all previous revisions. // Only the latest revision will be preserved (with a delete marker). // Unlike [Delete], Purge is a destructive operation and will remove all // previous revisions from the underlying streams. // // [LastRevision] option can be specified to only perform purge if the // latest revision the provided one. Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error // Watch for any updates to keys that match the keys argument which // could include wildcards. By default, the watcher will send the latest // value for each key and all future updates. Watch will send a nil // entry when it has received all initial values. There are a few ways // to configure the watcher: // // - IncludeHistory will have the key watcher send all historical values // for each key (up to KeyValueMaxHistory). // - IgnoreDeletes will have the key watcher not pass any keys with // delete markers. // - UpdatesOnly will have the key watcher only pass updates on values // (without latest values when started). // - MetaOnly will have the key watcher retrieve only the entry meta // data, not the entry value. // - ResumeFromRevision instructs the key watcher to resume from a // specific revision number. Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) // WatchAll will watch for any updates to all keys. It can be configured // with the same options as Watch. WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) // WatchFiltered will watch for any updates to keys that match the keys // argument. It can be configured with the same options as Watch. WatchFiltered(ctx context.Context, keys []string, opts ...WatchOpt) (KeyWatcher, error) // Keys will return all keys. // Deprecated: Use ListKeys instead to avoid memory issues. Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) // ListKeys will return KeyLister, allowing to retrieve all keys from // the key value store in a streaming fashion (on a channel). ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) // ListKeysFiltered ListKeysWithFilters returns a KeyLister for filtered keys in the bucket. ListKeysFiltered(ctx context.Context, filters ...string) (KeyLister, error) // History will return all historical values for the key (up to // KeyValueMaxHistory). History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) // Bucket returns the KV store name. Bucket() string // PurgeDeletes will remove all current delete markers. It can be // configured using DeleteMarkersOlderThan option to only remove delete // markers older than a certain duration. // // [PurgeDeletes] is a destructive operation and will remove all entries // with delete markers from the underlying stream. PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error // Status retrieves the status and configuration of a bucket. Status(ctx context.Context) (KeyValueStatus, error) } // KeyValueConfig is the configuration for a KeyValue store. KeyValueConfig struct { // Bucket is the name of the KeyValue store. Bucket name has to be // unique and can only contain alphanumeric characters, dashes, and // underscores. Bucket string `json:"bucket"` // Description is an optional description for the KeyValue store. Description string `json:"description,omitempty"` // MaxValueSize is the maximum size of a value in bytes. If not // specified, the default is -1 (unlimited). MaxValueSize int32 `json:"max_value_size,omitempty"` // History is the number of historical values to keep per key. If not // specified, the default is 1. Max is 64. History uint8 `json:"history,omitempty"` // TTL is the expiry time for keys. By default, keys do not expire. TTL time.Duration `json:"ttl,omitempty"` // MaxBytes is the maximum size in bytes of the KeyValue store. If not // specified, the default is -1 (unlimited). MaxBytes int64 `json:"max_bytes,omitempty"` // Storage is the type of storage to use for the KeyValue store. If not // specified, the default is FileStorage. Storage StorageType `json:"storage,omitempty"` // Replicas is the number of replicas to keep for the KeyValue store in // clustered jetstream. Defaults to 1, maximum is 5. Replicas int `json:"num_replicas,omitempty"` // Placement is used to declare where the stream should be placed via // tags and/or an explicit cluster name. Placement *Placement `json:"placement,omitempty"` // RePublish allows immediate republishing a message to the configured // subject after it's stored. RePublish *RePublish `json:"republish,omitempty"` // Mirror defines the consiguration for mirroring another KeyValue // store. Mirror *StreamSource `json:"mirror,omitempty"` // Sources defines the configuration for sources of a KeyValue store. Sources []*StreamSource `json:"sources,omitempty"` // Compression sets the underlying stream compression. // NOTE: Compression is supported for nats-server 2.10.0+ Compression bool `json:"compression,omitempty"` } // KeyLister is used to retrieve a list of key value store keys. It returns // a channel to read the keys from. The lister will always close the channel // when done (either all keys have been read or an error occurred) and // therefore can be used in range loops. Stop can be used to stop the lister // when not all keys have been read. KeyLister interface { Keys() <-chan string Stop() error } // KeyValueLister is used to retrieve a list of key value stores. It returns // a channel to read the KV store statuses from. The lister will always // close the channel when done (either all stores have been retrieved or an // error occurred) and therefore can be used in range loops. Stop can be // used to stop the lister when not all KeyValue stores have been read. KeyValueLister interface { Status() <-chan KeyValueStatus Error() error } // KeyValueNamesLister is used to retrieve a list of key value store names. // It returns a channel to read the KV bucket names from. The lister will // always close the channel when done (either all stores have been retrieved // or an error occurred) and therefore can be used in range loops. Stop can // be used to stop the lister when not all bucket names have been read. KeyValueNamesLister interface { Name() <-chan string Error() error } // KeyValueStatus is run-time status about a Key-Value bucket. KeyValueStatus interface { // Bucket returns the name of the KeyValue store. Bucket() string // Values is how many messages are in the bucket, including historical values. Values() uint64 // History returns the configured history kept per key. History() int64 // TTL returns the duration for which keys are kept in the bucket. TTL() time.Duration // BackingStore indicates what technology is used for storage of the bucket. // Currently only JetStream is supported. BackingStore() string // Bytes returns the size of the bucket in bytes. Bytes() uint64 // IsCompressed indicates if the data is compressed on disk. IsCompressed() bool } // KeyWatcher is what is returned when doing a watch. It can be used to // retrieve updates to keys. If not using UpdatesOnly option, it will also // send the latest value for each key. After all initial values have been // sent, a nil entry will be sent. Stop can be used to stop the watcher and // close the underlying channel. Watcher will not close the channel until // Stop is called or connection is closed. KeyWatcher interface { Updates() <-chan KeyValueEntry Stop() error } // KeyValueEntry is a retrieved entry for Get, List or Watch. KeyValueEntry interface { // Bucket is the bucket the data was loaded from. Bucket() string // Key is the name of the key that was retrieved. Key() string // Value is the retrieved value. Value() []byte // Revision is a unique sequence for this value. Revision() uint64 // Created is the time the data was put in the bucket. Created() time.Time // Delta is distance from the latest value (how far the current sequence // is from the latest). Delta() uint64 // Operation returns Put or Delete or Purge, depending on the manner in // which the current revision was created. Operation() KeyValueOp } ) type ( WatchOpt interface { configureWatcher(opts *watchOpts) error } watchOpts struct { // Do not send delete markers to the update channel. ignoreDeletes bool // Include all history per subject, not just last one. includeHistory bool // Include only updates for keys. updatesOnly bool // retrieve only the meta data of the entry metaOnly bool // resumeFromRevision is the revision to resume from. resumeFromRevision uint64 } // KVDeleteOpt is used to configure delete and purge operations. KVDeleteOpt interface { configureDelete(opts *deleteOpts) error } deleteOpts struct { // Remove all previous revisions. purge bool // Delete only if the latest revision matches. revision uint64 } // KVPurgeOpt is used to configure PurgeDeletes. KVPurgeOpt interface { configurePurge(opts *purgeOpts) error } purgeOpts struct { dmthr time.Duration // Delete markers threshold } ) // kvs is the implementation of KeyValue type kvs struct { name string streamName string pre string putPre string pushJS nats.JetStreamContext js *jetStream stream Stream // If true, it means that APIPrefix/Domain was set in the context // and we need to add something to some of our high level protocols // (such as Put, etc..) useJSPfx bool // To know if we can use the stream direct get API useDirect bool } // KeyValueOp represents the type of KV operation (Put, Delete, Purge). It is a // part of KeyValueEntry. type KeyValueOp uint8 // Available KeyValueOp values. const ( // KeyValuePut is a set on a revision which creates or updates a value for a // key. KeyValuePut KeyValueOp = iota // KeyValueDelete is a set on a revision which adds a delete marker for a // key. KeyValueDelete // KeyValuePurge is a set on a revision which removes all previous revisions // for a key. KeyValuePurge ) func (op KeyValueOp) String() string { switch op { case KeyValuePut: return "KeyValuePutOp" case KeyValueDelete: return "KeyValueDeleteOp" case KeyValuePurge: return "KeyValuePurgeOp" default: return "Unknown Operation" } } const ( kvBucketNamePre = "KV_" kvBucketNameTmpl = "KV_%s" kvSubjectsTmpl = "$KV.%s.>" kvSubjectsPreTmpl = "$KV.%s." kvSubjectsPreDomainTmpl = "%s.$KV.%s." kvNoPending = "0" ) const ( KeyValueMaxHistory = 64 AllKeys = ">" kvLatestRevision = 0 kvop = "KV-Operation" kvdel = "DEL" kvpurge = "PURGE" ) // Regex for valid keys and buckets. var ( validBucketRe = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) validKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9]+$`) validSearchKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9*]*[>]?$`) ) func (js *jetStream) KeyValue(ctx context.Context, bucket string) (KeyValue, error) { if !bucketValid(bucket) { return nil, ErrInvalidBucketName } streamName := fmt.Sprintf(kvBucketNameTmpl, bucket) stream, err := js.Stream(ctx, streamName) if err != nil { if errors.Is(err, ErrStreamNotFound) { err = ErrBucketNotFound } return nil, err } // Do some quick sanity checks that this is a correctly formed stream for KV. // Max msgs per subject should be > 0. if stream.CachedInfo().Config.MaxMsgsPerSubject < 1 { return nil, ErrBadBucket } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToKVS(js, pushJS, stream), nil } func (js *jetStream) CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { scfg, err := js.prepareKeyValueConfig(ctx, cfg) if err != nil { return nil, err } stream, err := js.CreateStream(ctx, scfg) if err != nil { if errors.Is(err, ErrStreamNameAlreadyInUse) { // errors are joined so that backwards compatibility is retained // and previous checks for ErrStreamNameAlreadyInUse will still work. err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) // If we have a failure to add, it could be because we have // a config change if the KV was created against before a bug fix // that changed the value of discard policy. // We will check if the stream exists and if the only difference // is the discard policy, we will update the stream. // The same logic applies for KVs created pre 2.9.x and // the AllowDirect setting. if stream, _ = js.Stream(ctx, scfg.Name); stream != nil { cfg := stream.CachedInfo().Config cfg.Discard = scfg.Discard cfg.AllowDirect = scfg.AllowDirect if reflect.DeepEqual(cfg, scfg) { stream, err = js.UpdateStream(ctx, scfg) } } } if err != nil { return nil, err } } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToKVS(js, pushJS, stream), nil } func (js *jetStream) UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { scfg, err := js.prepareKeyValueConfig(ctx, cfg) if err != nil { return nil, err } stream, err := js.UpdateStream(ctx, scfg) if err != nil { if errors.Is(err, ErrStreamNotFound) { err = fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) } return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToKVS(js, pushJS, stream), nil } func (js *jetStream) CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { scfg, err := js.prepareKeyValueConfig(ctx, cfg) if err != nil { return nil, err } stream, err := js.CreateOrUpdateStream(ctx, scfg) if err != nil { return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToKVS(js, pushJS, stream), nil } func (js *jetStream) prepareKeyValueConfig(ctx context.Context, cfg KeyValueConfig) (StreamConfig, error) { if !bucketValid(cfg.Bucket) { return StreamConfig{}, ErrInvalidBucketName } if _, err := js.AccountInfo(ctx); err != nil { return StreamConfig{}, err } // Default to 1 for history. Max is 64 for now. history := int64(1) if cfg.History > 0 { if cfg.History > KeyValueMaxHistory { return StreamConfig{}, ErrHistoryTooLarge } history = int64(cfg.History) } replicas := cfg.Replicas if replicas == 0 { replicas = 1 } // We will set explicitly some values so that we can do comparison // if we get an "already in use" error and need to check if it is same. maxBytes := cfg.MaxBytes if maxBytes == 0 { maxBytes = -1 } maxMsgSize := cfg.MaxValueSize if maxMsgSize == 0 { maxMsgSize = -1 } // When stream's MaxAge is not set, server uses 2 minutes as the default // for the duplicate window. If MaxAge is set, and lower than 2 minutes, // then the duplicate window will be set to that. If MaxAge is greater, // we will cap the duplicate window to 2 minutes (to be consistent with // previous behavior). duplicateWindow := 2 * time.Minute if cfg.TTL > 0 && cfg.TTL < duplicateWindow { duplicateWindow = cfg.TTL } var compression StoreCompression if cfg.Compression { compression = S2Compression } scfg := StreamConfig{ Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), Description: cfg.Description, MaxMsgsPerSubject: history, MaxBytes: maxBytes, MaxAge: cfg.TTL, MaxMsgSize: maxMsgSize, Storage: cfg.Storage, Replicas: replicas, Placement: cfg.Placement, AllowRollup: true, DenyDelete: true, Duplicates: duplicateWindow, MaxMsgs: -1, MaxConsumers: -1, AllowDirect: true, RePublish: cfg.RePublish, Compression: compression, Discard: DiscardNew, } if cfg.Mirror != nil { // Copy in case we need to make changes so we do not change caller's version. m := cfg.Mirror.copy() if !strings.HasPrefix(m.Name, kvBucketNamePre) { m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) } scfg.Mirror = m scfg.MirrorDirect = true } else if len(cfg.Sources) > 0 { // For now we do not allow direct subjects for sources. If that is desired a user could use stream API directly. for _, ss := range cfg.Sources { var sourceBucketName string if strings.HasPrefix(ss.Name, kvBucketNamePre) { sourceBucketName = ss.Name[len(kvBucketNamePre):] } else { sourceBucketName = ss.Name ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) } if ss.External == nil || sourceBucketName != cfg.Bucket { ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} } scfg.Sources = append(scfg.Sources, ss) } scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} } else { scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} } return scfg, nil } // DeleteKeyValue will delete this KeyValue store (JetStream stream). func (js *jetStream) DeleteKeyValue(ctx context.Context, bucket string) error { if !bucketValid(bucket) { return ErrInvalidBucketName } stream := fmt.Sprintf(kvBucketNameTmpl, bucket) if err := js.DeleteStream(ctx, stream); err != nil { if errors.Is(err, ErrStreamNotFound) { err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err) } return err } return nil } // KeyValueStoreNames is used to retrieve a list of key value store names func (js *jetStream) KeyValueStoreNames(ctx context.Context) KeyValueNamesLister { res := &kvLister{ kvNames: make(chan string), } l := &streamLister{js: js} streamsReq := streamsRequest{ Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), } go func() { defer close(res.kvNames) for { page, err := l.streamNames(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { res.err = err return } for _, name := range page { if !strings.HasPrefix(name, kvBucketNamePre) { continue } res.kvNames <- strings.TrimPrefix(name, kvBucketNamePre) } if errors.Is(err, ErrEndOfData) { return } } }() return res } // KeyValueStores is used to retrieve a list of key value store statuses func (js *jetStream) KeyValueStores(ctx context.Context) KeyValueLister { res := &kvLister{ kvs: make(chan KeyValueStatus), } l := &streamLister{js: js} streamsReq := streamsRequest{ Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), } go func() { defer close(res.kvs) for { page, err := l.streamInfos(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { res.err = err return } for _, info := range page { if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { continue } res.kvs <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} } if errors.Is(err, ErrEndOfData) { return } } }() return res } // KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus type KeyValueBucketStatus struct { nfo *StreamInfo bucket string } // Bucket the name of the bucket func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } // Values is how many messages are in the bucket, including historical values func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } // History returns the configured history kept per key func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } // TTL is how long the bucket keeps values for func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } // BackingStore indicates what technology is used for storage of the bucket func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } // StreamInfo is the stream info retrieved to create the status func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } // Bytes is the size of the stream func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } // IsCompressed indicates if the data is compressed on disk func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } type kvLister struct { kvs chan KeyValueStatus kvNames chan string err error } func (kl *kvLister) Status() <-chan KeyValueStatus { return kl.kvs } func (kl *kvLister) Name() <-chan string { return kl.kvNames } func (kl *kvLister) Error() error { return kl.err } func (js *jetStream) legacyJetStream() (nats.JetStreamContext, error) { opts := make([]nats.JSOpt, 0) if js.opts.apiPrefix != "" { opts = append(opts, nats.APIPrefix(js.opts.apiPrefix)) } if js.opts.clientTrace != nil { opts = append(opts, nats.ClientTrace{ RequestSent: js.opts.clientTrace.RequestSent, ResponseReceived: js.opts.clientTrace.ResponseReceived, }) } return js.conn.JetStream(opts...) } func bucketValid(bucket string) bool { if len(bucket) == 0 { return false } return validBucketRe.MatchString(bucket) } func keyValid(key string) bool { if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { return false } return validKeyRe.MatchString(key) } func searchKeyValid(key string) bool { if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { return false } return validSearchKeyRe.MatchString(key) } func (kv *kvs) get(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { if !keyValid(key) { return nil, ErrInvalidKey } var b strings.Builder b.WriteString(kv.pre) b.WriteString(key) var m *RawStreamMsg var err error if revision == kvLatestRevision { m, err = kv.stream.GetLastMsgForSubject(ctx, b.String()) } else { m, err = kv.stream.GetMsg(ctx, revision) // If a sequence was provided, just make sure that the retrieved // message subject matches the request. if err == nil && m.Subject != b.String() { return nil, ErrKeyNotFound } } if err != nil { if errors.Is(err, ErrMsgNotFound) { err = ErrKeyNotFound } return nil, err } entry := &kve{ bucket: kv.name, key: key, value: m.Data, revision: m.Sequence, created: m.Time, } // Double check here that this is not a DEL Operation marker. if len(m.Header) > 0 { switch m.Header.Get(kvop) { case kvdel: entry.op = KeyValueDelete return entry, ErrKeyDeleted case kvpurge: entry.op = KeyValuePurge return entry, ErrKeyDeleted } } return entry, nil } // kve is the implementation of KeyValueEntry type kve struct { bucket string key string value []byte revision uint64 delta uint64 created time.Time op KeyValueOp } func (e *kve) Bucket() string { return e.bucket } func (e *kve) Key() string { return e.key } func (e *kve) Value() []byte { return e.value } func (e *kve) Revision() uint64 { return e.revision } func (e *kve) Created() time.Time { return e.created } func (e *kve) Delta() uint64 { return e.delta } func (e *kve) Operation() KeyValueOp { return e.op } // Get returns the latest value for the key. func (kv *kvs) Get(ctx context.Context, key string) (KeyValueEntry, error) { e, err := kv.get(ctx, key, kvLatestRevision) if err != nil { if errors.Is(err, ErrKeyDeleted) { return nil, ErrKeyNotFound } return nil, err } return e, nil } // GetRevision returns a specific revision value for the key. func (kv *kvs) GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { e, err := kv.get(ctx, key, revision) if err != nil { if errors.Is(err, ErrKeyDeleted) { return nil, ErrKeyNotFound } return nil, err } return e, nil } // Put will place the new value for the key into the store. func (kv *kvs) Put(ctx context.Context, key string, value []byte) (uint64, error) { if !keyValid(key) { return 0, ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.apiPrefix) } if kv.putPre != "" { b.WriteString(kv.putPre) } else { b.WriteString(kv.pre) } b.WriteString(key) pa, err := kv.js.Publish(ctx, b.String(), value) if err != nil { return 0, err } return pa.Sequence, err } // PutString will place the string for the key into the store. func (kv *kvs) PutString(ctx context.Context, key string, value string) (uint64, error) { return kv.Put(ctx, key, []byte(value)) } // Create will add the key/value pair iff it does not exist. func (kv *kvs) Create(ctx context.Context, key string, value []byte) (revision uint64, err error) { v, err := kv.Update(ctx, key, value, 0) if err == nil { return v, nil } if e, err := kv.get(ctx, key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) { return kv.Update(ctx, key, value, e.Revision()) } // Check if the expected last subject sequence is not zero which implies // the key already exists. if errors.Is(err, ErrKeyExists) { jserr := ErrKeyExists.(*jsError) return 0, fmt.Errorf("%w: %s", err, jserr.message) } return 0, err } // Update will update the value if the latest revision matches. func (kv *kvs) Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) { if !keyValid(key) { return 0, ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.apiPrefix) } b.WriteString(kv.pre) b.WriteString(key) m := nats.Msg{Subject: b.String(), Header: nats.Header{}, Data: value} m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(revision, 10)) pa, err := kv.js.PublishMsg(ctx, &m) if err != nil { return 0, err } return pa.Sequence, err } // Delete will place a delete marker and leave all revisions. func (kv *kvs) Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error { if !keyValid(key) { return ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.apiPrefix) } if kv.putPre != "" { b.WriteString(kv.putPre) } else { b.WriteString(kv.pre) } b.WriteString(key) // DEL op marker. For watch functionality. m := nats.NewMsg(b.String()) var o deleteOpts for _, opt := range opts { if opt != nil { if err := opt.configureDelete(&o); err != nil { return err } } } if o.purge { m.Header.Set(kvop, kvpurge) m.Header.Set(MsgRollup, MsgRollupSubject) } else { m.Header.Set(kvop, kvdel) } if o.revision != 0 { m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(o.revision, 10)) } _, err := kv.js.PublishMsg(ctx, m) return err } // Purge will place a delete marker and remove all previous revisions. func (kv *kvs) Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error { return kv.Delete(ctx, key, append(opts, purge())...) } // purge removes all previous revisions. func purge() KVDeleteOpt { return deleteOptFn(func(opts *deleteOpts) error { opts.purge = true return nil }) } // Implementation for Watch type watcher struct { mu sync.Mutex updates chan KeyValueEntry sub *nats.Subscription initDone bool initPending uint64 received uint64 } // Updates returns the interior channel. func (w *watcher) Updates() <-chan KeyValueEntry { if w == nil { return nil } return w.updates } // Stop will unsubscribe from the watcher. func (w *watcher) Stop() error { if w == nil { return nil } return w.sub.Unsubscribe() } func (kv *kvs) WatchFiltered(ctx context.Context, keys []string, opts ...WatchOpt) (KeyWatcher, error) { for _, key := range keys { if !searchKeyValid(key) { return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "key cannot be empty and must be a valid NATS subject") } } var o watchOpts for _, opt := range opts { if opt != nil { if err := opt.configureWatcher(&o); err != nil { return nil, err } } } // Could be a pattern so don't check for validity as we normally do. for i, key := range keys { var b strings.Builder b.WriteString(kv.pre) b.WriteString(key) keys[i] = b.String() } // if no keys are provided, watch all keys if len(keys) == 0 { var b strings.Builder b.WriteString(kv.pre) b.WriteString(AllKeys) keys = []string{b.String()} } // We will block below on placing items on the chan. That is by design. w := &watcher{updates: make(chan KeyValueEntry, 256)} update := func(m *nats.Msg) { tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { return } if len(m.Subject) <= len(kv.pre) { return } subj := m.Subject[len(kv.pre):] var op KeyValueOp if len(m.Header) > 0 { switch m.Header.Get(kvop) { case kvdel: op = KeyValueDelete case kvpurge: op = KeyValuePurge } } delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) w.mu.Lock() defer w.mu.Unlock() if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { entry := &kve{ bucket: kv.name, key: subj, value: m.Data, revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), delta: delta, op: op, } w.updates <- entry } // Check if done and initial values. if !w.initDone { w.received++ // We set this on the first trip through.. if w.initPending == 0 { w.initPending = delta } if w.received > w.initPending || delta == 0 { w.initDone = true w.updates <- nil } } } // Used ordered consumer to deliver results. subOpts := []nats.SubOpt{nats.BindStream(kv.streamName), nats.OrderedConsumer()} if !o.includeHistory { subOpts = append(subOpts, nats.DeliverLastPerSubject()) } if o.updatesOnly { subOpts = append(subOpts, nats.DeliverNew()) } if o.metaOnly { subOpts = append(subOpts, nats.HeadersOnly()) } if o.resumeFromRevision > 0 { subOpts = append(subOpts, nats.StartSequence(o.resumeFromRevision)) } subOpts = append(subOpts, nats.Context(ctx)) // Create the sub and rest of initialization under the lock. // We want to prevent the race between this code and the // update() callback. w.mu.Lock() defer w.mu.Unlock() var sub *nats.Subscription var err error if len(keys) == 1 { sub, err = kv.pushJS.Subscribe(keys[0], update, subOpts...) } else { subOpts = append(subOpts, nats.ConsumerFilterSubjects(keys...)) sub, err = kv.pushJS.Subscribe("", update, subOpts...) } if err != nil { return nil, err } sub.SetClosedHandler(func(_ string) { close(w.updates) }) // If there were no pending messages at the time of the creation // of the consumer, send the marker. // Skip if UpdatesOnly() is set, since there will never be updates initially. if !o.updatesOnly { initialPending, err := sub.InitialConsumerPending() if err == nil && initialPending == 0 { w.initDone = true w.updates <- nil } } else { // if UpdatesOnly was used, mark initialization as complete w.initDone = true } w.sub = sub return w, nil } // Watch for any updates to keys that match the keys argument which could include wildcards. // Watch will send a nil entry when it has received all initial values. func (kv *kvs) Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) { return kv.WatchFiltered(ctx, []string{keys}, opts...) } // WatchAll will invoke the callback for all updates. func (kv *kvs) WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) { return kv.Watch(ctx, AllKeys, opts...) } // Keys will return all keys. func (kv *kvs) Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) { opts = append(opts, IgnoreDeletes(), MetaOnly()) watcher, err := kv.WatchAll(ctx, opts...) if err != nil { return nil, err } defer watcher.Stop() var keys []string for entry := range watcher.Updates() { if entry == nil { break } keys = append(keys, entry.Key()) } if len(keys) == 0 { return nil, ErrNoKeysFound } return keys, nil } type keyLister struct { watcher KeyWatcher keys chan string } // Keys will return all keys. func (kv *kvs) ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) { opts = append(opts, IgnoreDeletes(), MetaOnly()) watcher, err := kv.WatchAll(ctx, opts...) if err != nil { return nil, err } kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} go func() { defer close(kl.keys) defer watcher.Stop() for { select { case entry := <-watcher.Updates(): if entry == nil { return } kl.keys <- entry.Key() case <-ctx.Done(): return } } }() return kl, nil } // ListKeysWithFilters returns a channel of keys matching the provided filters using WatchFiltered. func (kv *kvs) ListKeysFiltered(ctx context.Context, filters ...string) (KeyLister, error) { watcher, err := kv.WatchFiltered(ctx, filters, IgnoreDeletes(), MetaOnly()) if err != nil { return nil, err } // Reuse the existing keyLister implementation kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} go func() { defer close(kl.keys) defer watcher.Stop() for { select { case entry := <-watcher.Updates(): if entry == nil { // Indicates all initial values are received return } kl.keys <- entry.Key() case <-ctx.Done(): return } } }() return kl, nil } func (kl *keyLister) Keys() <-chan string { return kl.keys } func (kl *keyLister) Stop() error { return kl.watcher.Stop() } // History will return all historical values for the key. func (kv *kvs) History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) { opts = append(opts, IncludeHistory()) watcher, err := kv.Watch(ctx, key, opts...) if err != nil { return nil, err } defer watcher.Stop() var entries []KeyValueEntry for entry := range watcher.Updates() { if entry == nil { break } entries = append(entries, entry) } if len(entries) == 0 { return nil, ErrKeyNotFound } return entries, nil } // Bucket returns the current bucket name. func (kv *kvs) Bucket() string { return kv.name } const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute // PurgeDeletes will remove all current delete markers. func (kv *kvs) PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error { var o purgeOpts for _, opt := range opts { if opt != nil { if err := opt.configurePurge(&o); err != nil { return err } } } watcher, err := kv.WatchAll(ctx) if err != nil { return err } defer watcher.Stop() var limit time.Time olderThan := o.dmthr // Negative value is used to instruct to always remove markers, regardless // of age. If set to 0 (or not set), use our default value. if olderThan == 0 { olderThan = kvDefaultPurgeDeletesMarkerThreshold } if olderThan > 0 { limit = time.Now().Add(-olderThan) } var deleteMarkers []KeyValueEntry for entry := range watcher.Updates() { if entry == nil { break } if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { deleteMarkers = append(deleteMarkers, entry) } } // Stop watcher here so as we purge we do not have the system continually updating numPending. watcher.Stop() var b strings.Builder // Do actual purges here. for _, entry := range deleteMarkers { b.WriteString(kv.pre) b.WriteString(entry.Key()) purgeOpts := []StreamPurgeOpt{WithPurgeSubject(b.String())} if olderThan > 0 && entry.Created().After(limit) { purgeOpts = append(purgeOpts, WithPurgeKeep(1)) } if err := kv.stream.Purge(ctx, purgeOpts...); err != nil { return err } b.Reset() } return nil } // Status retrieves the status and configuration of a bucket func (kv *kvs) Status(ctx context.Context) (KeyValueStatus, error) { nfo, err := kv.stream.Info(ctx) if err != nil { return nil, err } return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil } func mapStreamToKVS(js *jetStream, pushJS nats.JetStreamContext, stream Stream) *kvs { info := stream.CachedInfo() bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) kv := &kvs{ name: bucket, streamName: info.Config.Name, pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), js: js, pushJS: pushJS, stream: stream, // Determine if we need to use the JS prefix in front of Put and Delete operations useJSPfx: js.opts.apiPrefix != DefaultAPIPrefix, useDirect: info.Config.AllowDirect, } // If we are mirroring, we will have mirror direct on, so just use the mirror name // and override use if m := info.Config.Mirror; m != nil { bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) if m.External != nil && m.External.APIPrefix != "" { kv.useJSPfx = false kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) } else { kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) } } return kv } nats.go-1.41.0/jetstream/kv_options.go000066400000000000000000000061551477351342400177070ustar00rootroot00000000000000// Copyright 2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "fmt" "time" ) type watchOptFn func(opts *watchOpts) error func (opt watchOptFn) configureWatcher(opts *watchOpts) error { return opt(opts) } // IncludeHistory instructs the key watcher to include historical values as // well (up to KeyValueMaxHistory). func IncludeHistory() WatchOpt { return watchOptFn(func(opts *watchOpts) error { if opts.updatesOnly { return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption) } opts.includeHistory = true return nil }) } // UpdatesOnly instructs the key watcher to only include updates on values // (without latest values when started). func UpdatesOnly() WatchOpt { return watchOptFn(func(opts *watchOpts) error { if opts.includeHistory { return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption) } opts.updatesOnly = true return nil }) } // IgnoreDeletes will have the key watcher not pass any deleted keys. func IgnoreDeletes() WatchOpt { return watchOptFn(func(opts *watchOpts) error { opts.ignoreDeletes = true return nil }) } // MetaOnly instructs the key watcher to retrieve only the entry meta data, not // the entry value. func MetaOnly() WatchOpt { return watchOptFn(func(opts *watchOpts) error { opts.metaOnly = true return nil }) } // ResumeFromRevision instructs the key watcher to resume from a specific // revision number. func ResumeFromRevision(revision uint64) WatchOpt { return watchOptFn(func(opts *watchOpts) error { opts.resumeFromRevision = revision return nil }) } // DeleteMarkersOlderThan indicates that delete or purge markers older than that // will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data // will be removed but markers that are recent will be kept. // Note that if no option is specified, the default is 30 minutes. You can set // this option to a negative value to instruct to always remove the markers, // regardless of their age. type DeleteMarkersOlderThan time.Duration func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { opts.dmthr = time.Duration(ttl) return nil } type deleteOptFn func(opts *deleteOpts) error func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { return opt(opts) } // LastRevision deletes if the latest revision matches the provided one. If the // provided revision is not the latest, the delete will return an error. func LastRevision(revision uint64) KVDeleteOpt { return deleteOptFn(func(opts *deleteOpts) error { opts.revision = revision return nil }) } nats.go-1.41.0/jetstream/message.go000066400000000000000000000346701477351342400171430ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "bytes" "context" "errors" "fmt" "strconv" "strings" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/internal/parser" ) type ( // Msg contains methods to operate on a JetStream message. Metadata, Data, // Headers, Subject and Reply can be used to retrieve the specific parts of // the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and // Term are various flavors of ack requests. Msg interface { // Metadata returns [MsgMetadata] for a JetStream message. Metadata() (*MsgMetadata, error) // Data returns the message body. Data() []byte // Headers returns a map of headers for a message. Headers() nats.Header // Subject returns a subject on which a message was published/received. Subject() string // Reply returns a reply subject for a message. Reply() string // Ack acknowledges a message. This tells the server that the message was // successfully processed and it can move on to the next message. Ack() error // DoubleAck acknowledges a message and waits for ack reply from the server. // While it impacts performance, it is useful for scenarios where // message loss is not acceptable. DoubleAck(context.Context) error // Nak negatively acknowledges a message. This tells the server to // redeliver the message. // // Nak does not adhere to AckWait or Backoff configured on the consumer // and triggers instant redelivery. For a delayed redelivery, use // NakWithDelay. Nak() error // NakWithDelay negatively acknowledges a message. This tells the server // to redeliver the message after the given delay. NakWithDelay(delay time.Duration) error // InProgress tells the server that this message is being worked on. It // resets the redelivery timer on the server. InProgress() error // Term tells the server to not redeliver this message, regardless of // the value of MaxDeliver. Term() error // TermWithReason tells the server to not redeliver this message, regardless of // the value of MaxDeliver. The provided reason will be included in JetStream // advisory event sent by the server. // // Note: This will only work with JetStream servers >= 2.10.4. // For older servers, TermWithReason will be ignored by the server and the message // will not be terminated. TermWithReason(reason string) error } // MsgMetadata is the JetStream metadata associated with received messages. MsgMetadata struct { // Sequence is the sequence information for the message. Sequence SequencePair // NumDelivered is the number of times this message was delivered to the // consumer. NumDelivered uint64 // NumPending is the number of messages that match the consumer's // filter, but have not been delivered yet. NumPending uint64 // Timestamp is the time the message was originally stored on a stream. Timestamp time.Time // Stream is the stream name this message is stored on. Stream string // Consumer is the consumer name this message was delivered to. Consumer string // Domain is the domain this message was received on. Domain string } // SequencePair includes the consumer and stream sequence numbers for a // message. SequencePair struct { // Consumer is the consumer sequence number for message deliveries. This // is the total number of messages the consumer has seen (including // redeliveries). Consumer uint64 `json:"consumer_seq"` // Stream is the stream sequence number for a message. Stream uint64 `json:"stream_seq"` } jetStreamMsg struct { msg *nats.Msg ackd bool js *jetStream sync.Mutex } ackOpts struct { nakDelay time.Duration termReason string } ackType []byte ) const ( controlMsg = "100" badRequest = "400" noMessages = "404" reqTimeout = "408" maxBytesExceeded = "409" noResponders = "503" pinIdMismatch = "423" ) // Headers used when publishing messages. const ( // MsgIdHeader is used to specify a user-defined message ID. It can be used // e.g. for deduplication in conjunction with the Duplicates duration on // ConsumerConfig or to provide optimistic concurrency safety together with // [ExpectedLastMsgIDHeader]. // // This can be set when publishing messages using [WithMsgID] option. MsgIDHeader = "Nats-Msg-Id" // ExpectedStreamHeader contains stream name and is used to assure that the // published message is received by expected stream. Server will reject the // message if it is not the case. // // This can be set when publishing messages using [WithExpectStream] option. ExpectedStreamHeader = "Nats-Expected-Stream" // ExpectedLastSeqHeader contains the expected last sequence number of the // stream and can be used to apply optimistic concurrency control at stream // level. Server will reject the message if it is not the case. // // This can be set when publishing messages using [WithExpectLastSequence] // option. option. ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence" // ExpectedLastSubjSeqHeader contains the expected last sequence number on // the subject and can be used to apply optimistic concurrency control at // subject level. Server will reject the message if it is not the case. // // This can be set when publishing messages using // [WithExpectLastSequencePerSubject] option. ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence" // ExpectedLastMsgIDHeader contains the expected last message ID on the // subject and can be used to apply optimistic concurrency control at // stream level. Server will reject the message if it is not the case. // // This can be set when publishing messages using [WithExpectLastMsgID] // option. ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id" // MsgTTLHeader is used to specify the TTL for a specific message. This will // override the default TTL for the stream. MsgTTLHeader = "Nats-TTL" // MsgRollup is used to apply a purge of all prior messages in the stream // ("all") or at the subject ("sub") before this message. MsgRollup = "Nats-Rollup" ) // Headers for republished messages and direct gets. Those headers are set by // the server and should not be set by the client. const ( // StreamHeader contains the stream name the message was republished from or // the stream name the message was retrieved from using direct get. StreamHeader = "Nats-Stream" // SequenceHeader contains the original sequence number of the message. SequenceHeader = "Nats-Sequence" // TimeStampHeader contains the original timestamp of the message. TimeStampHeaer = "Nats-Time-Stamp" // SubjectHeader contains the original subject the message was published to. SubjectHeader = "Nats-Subject" // LastSequenceHeader contains the last sequence of the message having the // same subject, otherwise zero if this is the first message for the // subject. LastSequenceHeader = "Nats-Last-Sequence" ) // Rollups, can be subject only or all messages. const ( // MsgRollupSubject is used to purge all messages before this message on the // message subject. MsgRollupSubject = "sub" // MsgRollupAll is used to purge all messages before this message on the // stream. MsgRollupAll = "all" ) var ( ackAck ackType = []byte("+ACK") ackNak ackType = []byte("-NAK") ackProgress ackType = []byte("+WPI") ackTerm ackType = []byte("+TERM") ) // Metadata returns [MsgMetadata] for a JetStream message. func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) { if err := m.checkReply(); err != nil { return nil, err } tokens, err := parser.GetMetadataFields(m.msg.Reply) if err != nil { return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err) } meta := &MsgMetadata{ Domain: tokens[parser.AckDomainTokenPos], NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), Stream: tokens[parser.AckStreamTokenPos], Consumer: tokens[parser.AckConsumerTokenPos], } meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) return meta, nil } // Data returns the message body. func (m *jetStreamMsg) Data() []byte { return m.msg.Data } // Headers returns a map of headers for a message. func (m *jetStreamMsg) Headers() nats.Header { return m.msg.Header } // Subject returns a subject on which a message is published. func (m *jetStreamMsg) Subject() string { return m.msg.Subject } // Reply returns a reply subject for a JetStream message. func (m *jetStreamMsg) Reply() string { return m.msg.Reply } // Ack acknowledges a message. This tells the server that the message was // successfully processed and it can move on to the next message. func (m *jetStreamMsg) Ack() error { return m.ackReply(context.Background(), ackAck, false, ackOpts{}) } // DoubleAck acknowledges a message and waits for ack reply from the server. // While it impacts performance, it is useful for scenarios where // message loss is not acceptable. func (m *jetStreamMsg) DoubleAck(ctx context.Context) error { return m.ackReply(ctx, ackAck, true, ackOpts{}) } // Nak negatively acknowledges a message. This tells the server to // redeliver the message. func (m *jetStreamMsg) Nak() error { return m.ackReply(context.Background(), ackNak, false, ackOpts{}) } // NakWithDelay negatively acknowledges a message. This tells the server // to redeliver the message after the given delay. func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error { return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay}) } // InProgress tells the server that this message is being worked on. It // resets the redelivery timer on the server. func (m *jetStreamMsg) InProgress() error { return m.ackReply(context.Background(), ackProgress, false, ackOpts{}) } // Term tells the server to not redeliver this message, regardless of // the value of MaxDeliver. func (m *jetStreamMsg) Term() error { return m.ackReply(context.Background(), ackTerm, false, ackOpts{}) } // TermWithReason tells the server to not redeliver this message, regardless of // the value of MaxDeliver. The provided reason will be included in JetStream // advisory event sent by the server. // // Note: This will only work with JetStream servers >= 2.10.4. // For older servers, TermWithReason will be ignored by the server and the message // will not be terminated. func (m *jetStreamMsg) TermWithReason(reason string) error { return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason}) } func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error { err := m.checkReply() if err != nil { return err } m.Lock() if m.ackd { m.Unlock() return ErrMsgAlreadyAckd } m.Unlock() if sync { var cancel context.CancelFunc ctx, cancel = m.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } } var body []byte if opts.nakDelay > 0 { body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds())) } else if opts.termReason != "" { body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason)) } else { body = ackType } if sync { _, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body) } else { err = m.js.conn.Publish(m.msg.Reply, body) } if err != nil { return err } // Mark that the message has been acked unless it is ackProgress // which can be sent many times. if !bytes.Equal(ackType, ackProgress) { m.Lock() m.ackd = true m.Unlock() } return nil } func (m *jetStreamMsg) checkReply() error { if m == nil || m.msg.Sub == nil { return ErrMsgNotBound } if m.msg.Reply == "" { return ErrMsgNoReply } return nil } // Returns if the given message is a user message or not, and if // checkSts() is true, returns appropriate error based on the // content of the status (404, etc..) func checkMsg(msg *nats.Msg) (bool, error) { // If payload or no header, consider this a user message if len(msg.Data) > 0 || len(msg.Header) == 0 { return true, nil } // Look for status header val := msg.Header.Get("Status") descr := msg.Header.Get("Description") // If not present, then this is considered a user message if val == "" { return true, nil } switch val { case badRequest: return false, ErrBadRequest case noResponders: return false, nats.ErrNoResponders case noMessages: // 404 indicates that there are no messages. return false, ErrNoMessages case reqTimeout: return false, nats.ErrTimeout case controlMsg: return false, nil case pinIdMismatch: return false, ErrPinIDMismatch case maxBytesExceeded: if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") { return false, ErrMaxBytesExceeded } if strings.Contains(strings.ToLower(descr), "batch completed") { return false, ErrBatchCompleted } if strings.Contains(strings.ToLower(descr), "consumer deleted") { return false, ErrConsumerDeleted } if strings.Contains(strings.ToLower(descr), "leadership change") { return false, ErrConsumerLeadershipChanged } } return false, fmt.Errorf("nats: %s", msg.Header.Get("Description")) } func parsePending(msg *nats.Msg) (int, int, error) { msgsLeftStr := msg.Header.Get("Nats-Pending-Messages") var msgsLeft int var err error if msgsLeftStr != "" { msgsLeft, err = strconv.Atoi(msgsLeftStr) if err != nil { return 0, 0, errors.New("nats: invalid format of Nats-Pending-Messages") } } bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes") var bytesLeft int if bytesLeftStr != "" { bytesLeft, err = strconv.Atoi(bytesLeftStr) if err != nil { return 0, 0, errors.New("nats: invalid format of Nats-Pending-Bytes") } } return msgsLeft, bytesLeft, nil } // toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg { return &jetStreamMsg{ msg: msg, js: js, } } nats.go-1.41.0/jetstream/object.go000066400000000000000000001346001477351342400167570ustar00rootroot00000000000000// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "bytes" "context" "crypto/sha256" "encoding/base64" "encoding/json" "errors" "fmt" "hash" "io" "net" "os" "strings" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/internal/parser" "github.com/nats-io/nuid" ) type ( // ObjectStoreManager is used to manage object stores. It provides methods // CRUD operations on object stores. ObjectStoreManager interface { // ObjectStore will look up and bind to an existing object store // instance. // // If the object store with given name does not exist, ErrBucketNotFound // will be returned. ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) // CreateObjectStore will create a new object store with the given // configuration. // // If the object store with given name already exists, ErrBucketExists // will be returned. CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) // UpdateObjectStore will update an existing object store with the given // configuration. // // If the object store with given name does not exist, ErrBucketNotFound // will be returned. UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) // CreateOrUpdateObjectStore will create a new object store with the given // configuration if it does not exist, or update an existing object store // with the given configuration. CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) // DeleteObjectStore will delete the provided object store. // // If the object store with given name does not exist, ErrBucketNotFound // will be returned. DeleteObjectStore(ctx context.Context, bucket string) error // ObjectStoreNames is used to retrieve a list of bucket names. // It returns an ObjectStoreNamesLister exposing a channel to receive // the names of the object stores. // // The lister will always close the channel when done (either all names // have been read or an error occurred) and therefore can be used in a // for-range loop. ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister // ObjectStores is used to retrieve a list of bucket statuses. // It returns an ObjectStoresLister exposing a channel to receive // the statuses of the object stores. // // The lister will always close the channel when done (either all statuses // have been read or an error occurred) and therefore can be used in a // for-range loop. ObjectStores(ctx context.Context) ObjectStoresLister } // ObjectStore contains methods to operate on an object store. // Using the ObjectStore interface, it is possible to: // // - Perform CRUD operations on objects (Get, Put, Delete). // Get and put expose convenience methods to work with // byte slices, strings and files, in addition to streaming [io.Reader] // - Get information about an object without retrieving it. // - Update the metadata of an object. // - Add links to other objects or object stores. // - Watch for updates to a store // - List information about objects in a store // - Retrieve status and configuration of an object store. ObjectStore interface { // Put will place the contents from the reader into a new object. If the // object already exists, it will be overwritten. The object name is // required and is taken from the ObjectMeta.Name field. // // The reader will be read until EOF. ObjectInfo will be returned, containing // the object's metadata, digest and instance information. Put(ctx context.Context, obj ObjectMeta, reader io.Reader) (*ObjectInfo, error) // PutBytes is convenience function to put a byte slice into this object // store under the given name. // // ObjectInfo will be returned, containing the object's metadata, digest // and instance information. PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) // PutString is convenience function to put a string into this object // store under the given name. // // ObjectInfo will be returned, containing the object's metadata, digest // and instance information. PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) // PutFile is convenience function to put a file contents into this // object store. The name of the object will be the path of the file. // // ObjectInfo will be returned, containing the object's metadata, digest // and instance information. PutFile(ctx context.Context, file string) (*ObjectInfo, error) // Get will pull the named object from the object store. If the object // does not exist, ErrObjectNotFound will be returned. // // The returned ObjectResult will contain the object's metadata and a // reader to read the object's contents. The reader will be closed when // all data has been read or an error occurs. // // A GetObjectShowDeleted option can be supplied to return an object // even if it was marked as deleted. Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) // GetBytes is a convenience function to pull an object from this object // store and return it as a byte slice. // // If the object does not exist, ErrObjectNotFound will be returned. // // A GetObjectShowDeleted option can be supplied to return an object // even if it was marked as deleted. GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) // GetString is a convenience function to pull an object from this // object store and return it as a string. // // If the object does not exist, ErrObjectNotFound will be returned. // // A GetObjectShowDeleted option can be supplied to return an object // even if it was marked as deleted. GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) // GetFile is a convenience function to pull an object from this object // store and place it in a file. If the file already exists, it will be // overwritten, otherwise it will be created. // // If the object does not exist, ErrObjectNotFound will be returned. // A GetObjectShowDeleted option can be supplied to return an object // even if it was marked as deleted. GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error // GetInfo will retrieve the current information for the object, containing // the object's metadata and instance information. // // If the object does not exist, ErrObjectNotFound will be returned. // // A GetObjectInfoShowDeleted option can be supplied to return an object // even if it was marked as deleted. GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) // UpdateMeta will update the metadata for the object. // // If the object does not exist, ErrUpdateMetaDeleted will be returned. // If the new name is different from the old name, and an object with the // new name already exists, ErrObjectAlreadyExists will be returned. UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error // Delete will delete the named object from the object store. If the object // does not exist, ErrObjectNotFound will be returned. If the object is // already deleted, no error will be returned. // // All chunks for the object will be purged, and the object will be marked // as deleted. Delete(ctx context.Context, name string) error // AddLink will add a link to another object. A link is a reference to // another object. The provided name is the name of the link object. // The provided ObjectInfo is the info of the object being linked to. // // If an object with given name already exists, ErrObjectAlreadyExists // will be returned. // If object being linked to is deleted, ErrNoLinkToDeleted will be // returned. // If the provided object is a link, ErrNoLinkToLink will be returned. // If the provided object is nil or the name is empty, ErrObjectRequired // will be returned. AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) // AddBucketLink will add a link to another object store. A link is a // reference to another object store. The provided name is the name of // the link object. // The provided ObjectStore is the object store being linked to. // // If an object with given name already exists, ErrObjectAlreadyExists // will be returned. // If the provided object store is nil ErrBucketRequired will be returned. AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) // Seal will seal the object store, no further modifications will be allowed. Seal(ctx context.Context) error // Watch for any updates to objects in the store. By default, the watcher will send the latest // info for each object and all future updates. Watch will send a nil // entry when it has received all initial values. There are a few ways // to configure the watcher: // // - IncludeHistory will have the watcher send all historical information // for each object. // - IgnoreDeletes will have the watcher not pass any objects with // delete markers. // - UpdatesOnly will have the watcher only pass updates on objects // (without latest info when started). Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) // List will list information about objects in the store. // // If the object store is empty, ErrNoObjectsFound will be returned. List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) // Status retrieves the status and configuration of the bucket. Status(ctx context.Context) (ObjectStoreStatus, error) } // ObjectWatcher is what is returned when doing a watch. It can be used to // retrieve updates to objects in a bucket. If not using UpdatesOnly option, // it will also send the latest value for each key. After all initial values // have been sent, a nil entry will be sent. Stop can be used to stop the // watcher and close the underlying channel. Watcher will not close the // channel until Stop is called or connection is closed. ObjectWatcher interface { Updates() <-chan *ObjectInfo Stop() error } // ObjectStoreConfig is the configuration for the object store. ObjectStoreConfig struct { // Bucket is the name of the object store. Bucket name has to be // unique and can only contain alphanumeric characters, dashes, and // underscores. Bucket string `json:"bucket"` // Description is an optional description for the object store. Description string `json:"description,omitempty"` // TTL is the maximum age of objects in the store. If an object is not // updated within this time, it will be removed from the store. // By default, objects do not expire. TTL time.Duration `json:"max_age,omitempty"` // MaxBytes is the maximum size of the object store. If not specified, // the default is -1 (unlimited). MaxBytes int64 `json:"max_bytes,omitempty"` // Storage is the type of storage to use for the object store. If not // specified, the default is FileStorage. Storage StorageType `json:"storage,omitempty"` // Replicas is the number of replicas to keep for the object store in // clustered jetstream. Defaults to 1, maximum is 5. Replicas int `json:"num_replicas,omitempty"` // Placement is used to declare where the object store should be placed via // tags and/or an explicit cluster name. Placement *Placement `json:"placement,omitempty"` // Compression enables the underlying stream compression. // NOTE: Compression is supported for nats-server 2.10.0+ Compression bool `json:"compression,omitempty"` // Bucket-specific metadata // NOTE: Metadata requires nats-server v2.10.0+ Metadata map[string]string `json:"metadata,omitempty"` } // ObjectStoresLister is used to retrieve a list of object stores. It returns // a channel to read the bucket store statuses from. The lister will always // close the channel when done (either all stores have been retrieved or an // error occurred) and therefore can be used in range loops. Stop can be // used to stop the lister when not all object stores have been read. ObjectStoresLister interface { Status() <-chan ObjectStoreStatus Error() error } // ObjectStoreNamesLister is used to retrieve a list of object store names. // It returns a channel to read the bucket names from. The lister will // always close the channel when done (either all stores have been retrieved // or an error occurred) and therefore can be used in range loops. Stop can // be used to stop the lister when not all bucket names have been read. ObjectStoreNamesLister interface { Name() <-chan string Error() error } // ObjectStoreStatus is run-time status about a bucket. ObjectStoreStatus interface { // Bucket returns the name of the object store. Bucket() string // Description is the description supplied when creating the bucket. Description() string // TTL indicates how long objects are kept in the bucket. TTL() time.Duration // Storage indicates the underlying JetStream storage technology used to // store data. Storage() StorageType // Replicas indicates how many storage replicas are kept for the data in // the bucket. Replicas() int // Sealed indicates the stream is sealed and cannot be modified in any // way. Sealed() bool // Size is the combined size of all data in the bucket including // metadata, in bytes. Size() uint64 // BackingStore indicates what technology is used for storage of the // bucket. Currently only JetStream is supported. BackingStore() string // Metadata is the user supplied metadata for the bucket. Metadata() map[string]string // IsCompressed indicates if the data is compressed on disk. IsCompressed() bool } // ObjectMetaOptions is used to set additional options when creating an object. ObjectMetaOptions struct { // Link contains information about a link to another object or object store. // It should not be set manually, but rather by using the AddLink or // AddBucketLink methods. Link *ObjectLink `json:"link,omitempty"` // ChunkSize is the maximum size of each chunk in bytes. If not specified, // the default is 128k. ChunkSize uint32 `json:"max_chunk_size,omitempty"` } // ObjectMeta is high level information about an object. ObjectMeta struct { // Name is the name of the object. The name is required when adding an // object and has to be unique within the object store. Name string `json:"name"` // Description is an optional description for the object. Description string `json:"description,omitempty"` // Headers is an optional set of user-defined headers for the object. Headers nats.Header `json:"headers,omitempty"` // Metadata is the user supplied metadata for the object. Metadata map[string]string `json:"metadata,omitempty"` // Additional options for the object. Opts *ObjectMetaOptions `json:"options,omitempty"` } // ObjectInfo contains ObjectMeta and additional information about an // object. ObjectInfo struct { // ObjectMeta contains high level information about the object. ObjectMeta // Bucket is the name of the object store. Bucket string `json:"bucket"` // NUID is the unique identifier for the object set when putting the // object into the store. NUID string `json:"nuid"` // Size is the size of the object in bytes. It only includes the size of // the object itself, not the metadata. Size uint64 `json:"size"` // ModTime is the last modification time of the object. ModTime time.Time `json:"mtime"` // Chunks is the number of chunks the object is split into. Maximum size // of each chunk can be specified in ObjectMetaOptions. Chunks uint32 `json:"chunks"` // Digest is the SHA-256 digest of the object. It is used to verify the // integrity of the object. Digest string `json:"digest,omitempty"` // Deleted indicates if the object is marked as deleted. Deleted bool `json:"deleted,omitempty"` } // ObjectLink is used to embed links to other buckets and objects. ObjectLink struct { // Bucket is the name of the object store the link is pointing to. Bucket string `json:"bucket"` // Name can be used to link to a single object. // If empty means this is a link to the whole store, like a directory. Name string `json:"name,omitempty"` } // ObjectResult will return the object info and a reader to read the object's // contents. The reader will be closed when all data has been read or an // error occurs. ObjectResult interface { io.ReadCloser Info() (*ObjectInfo, error) Error() error } // GetObjectOpt is used to set additional options when getting an object. GetObjectOpt func(opts *getObjectOpts) error // GetObjectInfoOpt is used to set additional options when getting object info. GetObjectInfoOpt func(opts *getObjectInfoOpts) error // ListObjectsOpt is used to set additional options when listing objects. ListObjectsOpt func(opts *listObjectOpts) error getObjectOpts struct { // Include deleted object in the result. showDeleted bool } getObjectInfoOpts struct { // Include deleted object in the result. showDeleted bool } listObjectOpts struct { // Include deleted objects in the result channel. showDeleted bool } obs struct { name string streamName string stream Stream pushJS nats.JetStreamContext js *jetStream } // ObjectResult impl. objResult struct { sync.Mutex info *ObjectInfo r io.ReadCloser err error ctx context.Context digest hash.Hash } ) const ( objNameTmpl = "OBJ_%s" // OBJ_ // stream name objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject objNoPending = "0" objDefaultChunkSize = uint32(128 * 1024) // 128k objDigestType = "SHA-256=" objDigestTmpl = objDigestType + "%s" ) func (js *jetStream) CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { scfg, err := js.prepareObjectStoreConfig(ctx, cfg) if err != nil { return nil, err } stream, err := js.CreateStream(ctx, scfg) if err != nil { if errors.Is(err, ErrStreamNameAlreadyInUse) { // errors are joined so that backwards compatibility is retained // and previous checks for ErrStreamNameAlreadyInUse will still work. err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) } return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil } func (js *jetStream) UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { scfg, err := js.prepareObjectStoreConfig(ctx, cfg) if err != nil { return nil, err } // Attempt to update the stream. stream, err := js.UpdateStream(ctx, scfg) if err != nil { if errors.Is(err, ErrStreamNotFound) { return nil, fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) } return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil } func (js *jetStream) CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { scfg, err := js.prepareObjectStoreConfig(ctx, cfg) if err != nil { return nil, err } stream, err := js.CreateOrUpdateStream(ctx, scfg) if err != nil { return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil } func (js *jetStream) prepareObjectStoreConfig(ctx context.Context, cfg ObjectStoreConfig) (StreamConfig, error) { if !validBucketRe.MatchString(cfg.Bucket) { return StreamConfig{}, ErrInvalidStoreName } name := cfg.Bucket chunks := fmt.Sprintf(objAllChunksPreTmpl, name) meta := fmt.Sprintf(objAllMetaPreTmpl, name) // We will set explicitly some values so that we can do comparison // if we get an "already in use" error and need to check if it is same. // See kv replicas := cfg.Replicas if replicas == 0 { replicas = 1 } maxBytes := cfg.MaxBytes if maxBytes == 0 { maxBytes = -1 } var compression StoreCompression if cfg.Compression { compression = S2Compression } scfg := StreamConfig{ Name: fmt.Sprintf(objNameTmpl, name), Description: cfg.Description, Subjects: []string{chunks, meta}, MaxAge: cfg.TTL, MaxBytes: maxBytes, Storage: cfg.Storage, Replicas: replicas, Placement: cfg.Placement, Discard: DiscardNew, AllowRollup: true, AllowDirect: true, Metadata: cfg.Metadata, Compression: compression, } return scfg, nil } // ObjectStore will look up and bind to an existing object store instance. func (js *jetStream) ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) { if !validBucketRe.MatchString(bucket) { return nil, ErrInvalidStoreName } streamName := fmt.Sprintf(objNameTmpl, bucket) stream, err := js.Stream(ctx, streamName) if err != nil { if errors.Is(err, ErrStreamNotFound) { err = ErrBucketNotFound } return nil, err } pushJS, err := js.legacyJetStream() if err != nil { return nil, err } return mapStreamToObjectStore(js, pushJS, bucket, stream), nil } // DeleteObjectStore will delete the underlying stream for the named object. func (js *jetStream) DeleteObjectStore(ctx context.Context, bucket string) error { if !validBucketRe.MatchString(bucket) { return ErrInvalidStoreName } stream := fmt.Sprintf(objNameTmpl, bucket) if err := js.DeleteStream(ctx, stream); err != nil { if errors.Is(err, ErrStreamNotFound) { err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err) } return err } return nil } func encodeName(name string) string { return base64.URLEncoding.EncodeToString([]byte(name)) } // Put will place the contents from the reader into this object-store. func (obs *obs) Put(ctx context.Context, meta ObjectMeta, r io.Reader) (*ObjectInfo, error) { if meta.Name == "" { return nil, ErrBadObjectMeta } if meta.Opts == nil { meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} } else if meta.Opts.Link != nil { return nil, ErrLinkNotAllowed } else if meta.Opts.ChunkSize == 0 { meta.Opts.ChunkSize = objDefaultChunkSize } // Create the new nuid so chunks go on a new subject if the name is re-used newnuid := nuid.Next() // These will be used in more than one place chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem // Chunks on the old nuid can be cleaned up at the end einfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name if err != nil && err != ErrObjectNotFound { return nil, err } // For async error handling var perr error var mu sync.Mutex setErr := func(err error) { mu.Lock() defer mu.Unlock() perr = err } getErr := func() error { mu.Lock() defer mu.Unlock() return perr } // Create our own JS context to handle errors etc. pubJS, err := New(obs.js.conn, WithPublishAsyncErrHandler(func(js JetStream, _ *nats.Msg, err error) { setErr(err) })) if err != nil { return nil, err } defer pubJS.(*jetStream).cleanupReplySub() purgePartial := func() { // wait until all pubs are complete or up to default timeout before attempting purge select { case <-pubJS.PublishAsyncComplete(): case <-ctx.Done(): } _ = obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) } m, h := nats.NewMsg(chunkSubj), sha256.New() chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) // set up the info object. The chunk upload sets the size and digest info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: meta} for r != nil { if ctx != nil { select { case <-ctx.Done(): if ctx.Err() == context.Canceled { err = ctx.Err() } else { err = nats.ErrTimeout } default: } if err != nil { purgePartial() return nil, err } } // Actual read. // TODO(dlc) - Deadline? n, readErr := r.Read(chunk) // Handle all non EOF errors if readErr != nil && readErr != io.EOF { purgePartial() return nil, readErr } // Add chunk only if we received data if n > 0 { // Chunk processing. m.Data = chunk[:n] h.Write(m.Data) // Send msg itself. if _, err := pubJS.PublishMsgAsync(m); err != nil { purgePartial() return nil, err } if err := getErr(); err != nil { purgePartial() return nil, err } // Update totals. sent++ total += uint64(n) } // EOF Processing. if readErr == io.EOF { // Place meta info. info.Size, info.Chunks = uint64(total), uint32(sent) info.Digest = GetObjectDigestValue(h) break } } // Prepare the meta message metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) mm := nats.NewMsg(metaSubj) mm.Header.Set(MsgRollup, MsgRollupSubject) mm.Data, err = json.Marshal(info) if err != nil { if r != nil { purgePartial() } return nil, err } // Publish the meta message. _, err = pubJS.PublishMsgAsync(mm) if err != nil { if r != nil { purgePartial() } return nil, err } // Wait for all to be processed. select { case <-pubJS.PublishAsyncComplete(): if err := getErr(); err != nil { if r != nil { purgePartial() } return nil, err } case <-ctx.Done(): return nil, nats.ErrTimeout } info.ModTime = time.Now().UTC() // This time is not actually the correct time // Delete any original chunks. if einfo != nil && !einfo.Deleted { echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) _ = obs.stream.Purge(ctx, WithPurgeSubject(echunkSubj)) } // TODO would it be okay to do this to return the info with the correct time? // With the understanding that it is an extra call to the server. // Otherwise the time the user gets back is the client time, not the server time. // return obs.GetInfo(info.Name) return info, nil } // GetObjectDigestValue calculates the base64 value of hashed data func GetObjectDigestValue(data hash.Hash) string { sha := data.Sum(nil) return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) } // DecodeObjectDigest decodes base64 hash func DecodeObjectDigest(data string) ([]byte, error) { digest := strings.SplitN(data, "=", 2) if len(digest) != 2 { return nil, ErrInvalidDigestFormat } return base64.URLEncoding.DecodeString(digest[1]) } func (info *ObjectInfo) isLink() bool { return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil } // Get will pull the object from the underlying stream. func (obs *obs) Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) { var o getObjectOpts for _, opt := range opts { if opt != nil { if err := opt(&o); err != nil { return nil, err } } } infoOpts := make([]GetObjectInfoOpt, 0) if o.showDeleted { infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) } // Grab meta info. info, err := obs.GetInfo(ctx, name, infoOpts...) if err != nil { return nil, err } if info.NUID == "" { return nil, ErrBadObjectMeta } // Check for object links. If single objects we do a pass through. if info.isLink() { if info.ObjectMeta.Opts.Link.Name == "" { return nil, ErrCantGetBucket } // is the link in the same bucket? lbuck := info.ObjectMeta.Opts.Link.Bucket if lbuck == obs.name { return obs.Get(ctx, info.ObjectMeta.Opts.Link.Name) } // different bucket lobs, err := obs.js.ObjectStore(ctx, lbuck) if err != nil { return nil, err } return lobs.Get(ctx, info.ObjectMeta.Opts.Link.Name) } result := &objResult{info: info, ctx: ctx} if info.Size == 0 { return result, nil } pr, pw := net.Pipe() result.r = pr gotErr := func(m *nats.Msg, err error) { pw.Close() m.Sub.Unsubscribe() result.setErr(err) } // For calculating sum256 result.digest = sha256.New() processChunk := func(m *nats.Msg) { var err error if ctx != nil { select { case <-ctx.Done(): if ctx.Err() == context.Canceled { err = ctx.Err() } else { err = nats.ErrTimeout } default: } if err != nil { gotErr(m, err) return } } tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { gotErr(m, err) return } // Write to our pipe. for b := m.Data; len(b) > 0; { n, err := pw.Write(b) if err != nil { gotErr(m, err) return } b = b[n:] } // Update sha256 result.digest.Write(m.Data) // Check if we are done. if tokens[parser.AckNumPendingTokenPos] == objNoPending { pw.Close() m.Sub.Unsubscribe() } } chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) streamName := fmt.Sprintf(objNameTmpl, obs.name) subscribeOpts := []nats.SubOpt{ nats.OrderedConsumer(), nats.Context(ctx), nats.BindStream(streamName), } _, err = obs.pushJS.Subscribe(chunkSubj, processChunk, subscribeOpts...) if err != nil { return nil, err } return result, nil } // Delete will delete the object. func (obs *obs) Delete(ctx context.Context, name string) error { // Grab meta info. info, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) if err != nil { return err } if info.NUID == "" { return ErrBadObjectMeta } // Place a rollup delete marker and publish the info info.Deleted = true info.Size, info.Chunks, info.Digest = 0, 0, "" if err = publishMeta(ctx, info, obs.js); err != nil { return err } // Purge chunks for the object. chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) return obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) } func publishMeta(ctx context.Context, info *ObjectInfo, js *jetStream) error { // marshal the object into json, don't store an actual time info.ModTime = time.Time{} data, err := json.Marshal(info) if err != nil { return err } // Prepare and publish the message. mm := nats.NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) mm.Header.Set(MsgRollup, MsgRollupSubject) mm.Data = data if _, err := js.PublishMsg(ctx, mm); err != nil { return err } // set the ModTime in case it's returned to the user, even though it's not the correct time. info.ModTime = time.Now().UTC() return nil } // AddLink will add a link to another object if it's not deleted and not another link // name is the name of this link object // obj is what is being linked too func (obs *obs) AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) { if name == "" { return nil, ErrNameRequired } // TODO Handle stale info if obj == nil || obj.Name == "" { return nil, ErrObjectRequired } if obj.Deleted { return nil, ErrNoLinkToDeleted } if obj.isLink() { return nil, ErrNoLinkToLink } // If object with link's name is found, error. // If link with link's name is found, that's okay to overwrite. // If there was an error that was not ErrObjectNotFound, error. einfo, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) if einfo != nil { if !einfo.isLink() { return nil, ErrObjectAlreadyExists } } else if err != ErrObjectNotFound { return nil, err } // create the meta for the link meta := &ObjectMeta{ Name: name, Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, } info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} // put the link object if err = publishMeta(ctx, info, obs.js); err != nil { return nil, err } return info, nil } // AddBucketLink will add a link to another object store. func (ob *obs) AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) { if name == "" { return nil, ErrNameRequired } if bucket == nil { return nil, ErrBucketRequired } bos, ok := bucket.(*obs) if !ok { return nil, ErrBucketMalformed } // If object with link's name is found, error. // If link with link's name is found, that's okay to overwrite. // If there was an error that was not ErrObjectNotFound, error. einfo, err := ob.GetInfo(ctx, name, GetObjectInfoShowDeleted()) if einfo != nil { if !einfo.isLink() { return nil, ErrObjectAlreadyExists } } else if err != ErrObjectNotFound { return nil, err } // create the meta for the link meta := &ObjectMeta{ Name: name, Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, } info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} // put the link object err = publishMeta(ctx, info, ob.js) if err != nil { return nil, err } return info, nil } // PutBytes is convenience function to put a byte slice into this object store. func (obs *obs) PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) { return obs.Put(ctx, ObjectMeta{Name: name}, bytes.NewReader(data)) } // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. func (obs *obs) GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) { result, err := obs.Get(ctx, name, opts...) if err != nil { return nil, err } defer result.Close() var b bytes.Buffer if _, err := b.ReadFrom(result); err != nil { return nil, err } return b.Bytes(), nil } // PutString is convenience function to put a string into this object store. func (obs *obs) PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) { return obs.Put(ctx, ObjectMeta{Name: name}, strings.NewReader(data)) } // GetString is a convenience function to pull an object from this object store and return it as a string. func (obs *obs) GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) { result, err := obs.Get(ctx, name, opts...) if err != nil { return "", err } defer result.Close() var b bytes.Buffer if _, err := b.ReadFrom(result); err != nil { return "", err } return b.String(), nil } // PutFile is convenience function to put a file into an object store. func (obs *obs) PutFile(ctx context.Context, file string) (*ObjectInfo, error) { f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() return obs.Put(ctx, ObjectMeta{Name: file}, f) } // GetFile is a convenience function to pull and object and place in a file. func (obs *obs) GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error { // Expect file to be new. f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return err } defer f.Close() result, err := obs.Get(ctx, name, opts...) if err != nil { os.Remove(f.Name()) return err } defer result.Close() // Stream copy to the file. _, err = io.Copy(f, result) return err } // GetInfo will retrieve the current information for the object. func (obs *obs) GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { // Grab last meta value we have. if name == "" { return nil, ErrNameRequired } var o getObjectInfoOpts for _, opt := range opts { if opt != nil { if err := opt(&o); err != nil { return nil, err } } } metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call m, err := obs.stream.GetLastMsgForSubject(ctx, metaSubj) if err != nil { if errors.Is(err, ErrMsgNotFound) { err = ErrObjectNotFound } if errors.Is(err, ErrStreamNotFound) { err = ErrBucketNotFound } return nil, err } var info ObjectInfo if err := json.Unmarshal(m.Data, &info); err != nil { return nil, ErrBadObjectMeta } if !o.showDeleted && info.Deleted { return nil, ErrObjectNotFound } info.ModTime = m.Time return &info, nil } // UpdateMeta will update the meta for the object. func (obs *obs) UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error { // Grab the current meta. info, err := obs.GetInfo(ctx, name) if err != nil { if errors.Is(err, ErrObjectNotFound) { return ErrUpdateMetaDeleted } return err } // If the new name is different from the old, and it exists, error // If there was an error that was not ErrObjectNotFound, error. if name != meta.Name { existingInfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) if err != nil && !errors.Is(err, ErrObjectNotFound) { return err } if err == nil && !existingInfo.Deleted { return ErrObjectAlreadyExists } } // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) // These should only be updated internally when appropriate. info.Name = meta.Name info.Description = meta.Description info.Headers = meta.Headers info.Metadata = meta.Metadata // Prepare the meta message if err = publishMeta(ctx, info, obs.js); err != nil { return err } // did the name of this object change? We just stored the meta under the new name // so delete the meta from the old name via purge stream for subject if name != meta.Name { metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) return obs.stream.Purge(ctx, WithPurgeSubject(metaSubj)) } return nil } // Seal will seal the object store, no further modifications will be allowed. func (obs *obs) Seal(ctx context.Context) error { si, err := obs.stream.Info(ctx) if err != nil { return err } // Seal the stream from being able to take on more messages. cfg := si.Config cfg.Sealed = true _, err = obs.js.UpdateStream(ctx, cfg) return err } // Implementation for Watch type objWatcher struct { updates chan *ObjectInfo sub *nats.Subscription } // Updates returns the interior channel. func (w *objWatcher) Updates() <-chan *ObjectInfo { if w == nil { return nil } return w.updates } // Stop will unsubscribe from the watcher. func (w *objWatcher) Stop() error { if w == nil { return nil } return w.sub.Unsubscribe() } // Watch for changes in the underlying store and receive meta information updates. func (obs *obs) Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) { var o watchOpts for _, opt := range opts { if opt != nil { if err := opt.configureWatcher(&o); err != nil { return nil, err } } } var initDoneMarker bool w := &objWatcher{updates: make(chan *ObjectInfo, 32)} update := func(m *nats.Msg) { var info ObjectInfo if err := json.Unmarshal(m.Data, &info); err != nil { return // TODO(dlc) - Communicate this upwards? } meta, err := m.Metadata() if err != nil { return } if !o.ignoreDeletes || !info.Deleted { info.ModTime = meta.Timestamp w.updates <- &info } // if UpdatesOnly is set, no not send nil to the channel // as it would always be triggered after initializing the watcher if !initDoneMarker && meta.NumPending == 0 { initDoneMarker = true w.updates <- nil } } allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) _, err := obs.stream.GetLastMsgForSubject(ctx, allMeta) // if there are no messages on the stream and we are not watching // updates only, send nil to the channel to indicate that the initial // watch is done if !o.updatesOnly { if errors.Is(err, ErrMsgNotFound) { initDoneMarker = true w.updates <- nil } } else { // if UpdatesOnly was used, mark initialization as complete initDoneMarker = true } // Used ordered consumer to deliver results. streamName := fmt.Sprintf(objNameTmpl, obs.name) subOpts := []nats.SubOpt{nats.OrderedConsumer(), nats.BindStream(streamName)} if !o.includeHistory { subOpts = append(subOpts, nats.DeliverLastPerSubject()) } if o.updatesOnly { subOpts = append(subOpts, nats.DeliverNew()) } subOpts = append(subOpts, nats.Context(ctx)) sub, err := obs.pushJS.Subscribe(allMeta, update, subOpts...) if err != nil { return nil, err } sub.SetClosedHandler(func(_ string) { close(w.updates) }) w.sub = sub return w, nil } // List will list all the objects in this store. func (obs *obs) List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) { var o listObjectOpts for _, opt := range opts { if opt != nil { if err := opt(&o); err != nil { return nil, err } } } watchOpts := make([]WatchOpt, 0) if !o.showDeleted { watchOpts = append(watchOpts, IgnoreDeletes()) } watcher, err := obs.Watch(ctx, watchOpts...) if err != nil { return nil, err } defer watcher.Stop() var objs []*ObjectInfo updates := watcher.Updates() Updates: for { select { case entry := <-updates: if entry == nil { break Updates } objs = append(objs, entry) case <-ctx.Done(): return nil, ctx.Err() } } if len(objs) == 0 { return nil, ErrNoObjectsFound } return objs, nil } // ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus type ObjectBucketStatus struct { nfo *StreamInfo bucket string } // Bucket is the name of the bucket func (s *ObjectBucketStatus) Bucket() string { return s.bucket } // Description is the description supplied when creating the bucket func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } // TTL indicates how long objects are kept in the bucket func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } // Storage indicates the underlying JetStream storage technology used to store data func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } // Replicas indicates how many storage replicas are kept for the data in the bucket func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } // Sealed indicates the stream is sealed and cannot be modified in any way func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } // Size is the combined size of all data in the bucket including metadata, in bytes func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } // BackingStore indicates what technology is used for storage of the bucket func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } // Metadata is the metadata supplied when creating the bucket func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } // StreamInfo is the stream info retrieved to create the status func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } // IsCompressed indicates if the data is compressed on disk func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } // Status retrieves run-time status about a bucket func (obs *obs) Status(ctx context.Context) (ObjectStoreStatus, error) { nfo, err := obs.stream.Info(ctx) if err != nil { return nil, err } status := &ObjectBucketStatus{ nfo: nfo, bucket: obs.name, } return status, nil } // Read impl. func (o *objResult) Read(p []byte) (n int, err error) { o.Lock() defer o.Unlock() readDeadline := time.Now().Add(defaultAPITimeout) if ctx := o.ctx; ctx != nil { if deadline, ok := ctx.Deadline(); ok { readDeadline = deadline } select { case <-ctx.Done(): if ctx.Err() == context.Canceled { o.err = ctx.Err() } else { o.err = nats.ErrTimeout } default: } } if o.err != nil { return 0, o.err } if o.r == nil { return 0, io.EOF } r := o.r.(net.Conn) _ = r.SetReadDeadline(readDeadline) n, err = r.Read(p) if err, ok := err.(net.Error); ok && err.Timeout() { if ctx := o.ctx; ctx != nil { select { case <-ctx.Done(): if ctx.Err() == context.Canceled { return 0, ctx.Err() } else { return 0, nats.ErrTimeout } default: err = nil } } } if err == io.EOF { // Make sure the digest matches. sha := o.digest.Sum(nil) rsha, decodeErr := DecodeObjectDigest(o.info.Digest) if decodeErr != nil { o.err = decodeErr return 0, o.err } if !bytes.Equal(sha[:], rsha) { o.err = ErrDigestMismatch return 0, o.err } } return n, err } // Close impl. func (o *objResult) Close() error { o.Lock() defer o.Unlock() if o.r == nil { return nil } return o.r.Close() } func (o *objResult) setErr(err error) { o.Lock() defer o.Unlock() o.err = err } func (o *objResult) Info() (*ObjectInfo, error) { o.Lock() defer o.Unlock() return o.info, o.err } func (o *objResult) Error() error { o.Lock() defer o.Unlock() return o.err } // ObjectStoreNames is used to retrieve a list of bucket names func (js *jetStream) ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister { res := &obsLister{ obsNames: make(chan string), } l := &streamLister{js: js} streamsReq := streamsRequest{ Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), } go func() { defer close(res.obsNames) for { page, err := l.streamNames(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { res.err = err return } for _, name := range page { if !strings.HasPrefix(name, "OBJ_") { continue } res.obsNames <- strings.TrimPrefix(name, "OBJ_") } if errors.Is(err, ErrEndOfData) { return } } }() return res } // ObjectStores is used to retrieve a list of bucket statuses func (js *jetStream) ObjectStores(ctx context.Context) ObjectStoresLister { res := &obsLister{ obs: make(chan ObjectStoreStatus), } l := &streamLister{js: js} streamsReq := streamsRequest{ Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), } go func() { defer close(res.obs) for { page, err := l.streamInfos(ctx, streamsReq) if err != nil && !errors.Is(err, ErrEndOfData) { res.err = err return } for _, info := range page { if !strings.HasPrefix(info.Config.Name, "OBJ_") { continue } res.obs <- &ObjectBucketStatus{ nfo: info, bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), } } if errors.Is(err, ErrEndOfData) { return } } }() return res } type obsLister struct { obs chan ObjectStoreStatus obsNames chan string err error } func (ol *obsLister) Status() <-chan ObjectStoreStatus { return ol.obs } func (ol *obsLister) Name() <-chan string { return ol.obsNames } func (ol *obsLister) Error() error { return ol.err } func mapStreamToObjectStore(js *jetStream, pushJS nats.JetStreamContext, bucket string, stream Stream) *obs { info := stream.CachedInfo() obs := &obs{ name: bucket, js: js, pushJS: pushJS, streamName: info.Config.Name, stream: stream, } return obs } nats.go-1.41.0/jetstream/object_options.go000066400000000000000000000024421477351342400205300ustar00rootroot00000000000000// Copyright 2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream // GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was // marked as deleted. func GetObjectShowDeleted() GetObjectOpt { return func(opts *getObjectOpts) error { opts.showDeleted = true return nil } } // GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event // if it was marked as deleted. func GetObjectInfoShowDeleted() GetObjectInfoOpt { return func(opts *getObjectInfoOpts) error { opts.showDeleted = true return nil } } // ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted // objects. func ListObjectsShowDeleted() ListObjectsOpt { return func(opts *listObjectOpts) error { opts.showDeleted = true return nil } } nats.go-1.41.0/jetstream/ordered.go000066400000000000000000000526231477351342400171410ustar00rootroot00000000000000// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "errors" "fmt" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/nats.go" ) type ( orderedConsumer struct { js *jetStream cfg *OrderedConsumerConfig stream string currentConsumer *pullConsumer currentSub *pullSubscription cursor cursor namePrefix string serial int consumerType consumerType doReset chan struct{} resetInProgress atomic.Uint32 userErrHandler ConsumeErrHandlerFunc stopAfter int stopAfterMsgsLeft chan int withStopAfter bool runningFetch *fetchResult subscription *orderedSubscription sync.Mutex } orderedSubscription struct { consumer *orderedConsumer opts []PullMessagesOpt done chan struct{} closed atomic.Uint32 } cursor struct { streamSeq uint64 deliverSeq uint64 } consumerType int ) const ( consumerTypeNotSet consumerType = iota consumerTypeConsume consumerTypeFetch ) var ( errOrderedSequenceMismatch = errors.New("sequence mismatch") errOrderedConsumerClosed = errors.New("ordered consumer closed") ) // Consume can be used to continuously receive messages and handle them // with the provided callback function. Consume cannot be used concurrently // when using ordered consumer. // // See [Consumer.Consume] for more details. func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { err := c.reset() if err != nil { return nil, err } } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { return nil, ErrOrderedConsumerConcurrentRequests } if c.consumerType == consumerTypeFetch { return nil, ErrOrderConsumerUsedAsFetch } c.consumerType = consumerTypeConsume consumeOpts, err := parseConsumeOpts(true, opts...) if err != nil { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) } c.userErrHandler = consumeOpts.ErrHandler opts = append(opts, consumeReconnectNotify(), ConsumeErrHandler(c.errHandler(c.serial))) if consumeOpts.StopAfter > 0 { c.withStopAfter = true c.stopAfter = consumeOpts.StopAfter } c.stopAfterMsgsLeft = make(chan int, 1) if c.stopAfter > 0 { opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) } sub := &orderedSubscription{ consumer: c, done: make(chan struct{}, 1), } c.subscription = sub internalHandler := func(serial int) func(msg Msg) { return func(msg Msg) { // handler is a noop if message was delivered for a consumer with different serial if serial != c.serial { return } meta, err := msg.Metadata() if err != nil { c.errHandler(serial)(c.currentSub, err) return } dseq := meta.Sequence.Consumer if dseq != c.cursor.deliverSeq+1 { c.errHandler(serial)(sub, errOrderedSequenceMismatch) return } c.cursor.deliverSeq = dseq c.cursor.streamSeq = meta.Sequence.Stream handler(msg) } } cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...) if err != nil { return nil, err } c.currentSub = cc.(*pullSubscription) go func() { for { select { case <-c.doReset: if err := c.reset(); err != nil { if errors.Is(err, errOrderedConsumerClosed) { continue } c.errHandler(c.serial)(c.currentSub, err) } if c.withStopAfter { select { case c.stopAfter = <-c.stopAfterMsgsLeft: default: } if c.stopAfter <= 0 { sub.Stop() return } } if c.stopAfter > 0 { opts = opts[:len(opts)-2] } else { opts = opts[:len(opts)-1] } // overwrite the previous err handler to use the new serial opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial))) if c.withStopAfter { opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) } if cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil { c.errHandler(c.serial)(cc, err) } else { c.Lock() c.currentSub = cc.(*pullSubscription) c.Unlock() } case <-sub.done: s := sub.consumer.currentSub if s != nil { sub.consumer.Lock() s.Stop() sub.consumer.Unlock() } return case msgsLeft, ok := <-c.stopAfterMsgsLeft: if !ok { close(sub.done) } c.stopAfter = msgsLeft return } } }() return sub, nil } func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) { return func(cc ConsumeContext, err error) { c.Lock() defer c.Unlock() if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) && !errors.Is(err, errConnected) { c.userErrHandler(cc, err) } if errors.Is(err, ErrNoHeartbeat) || errors.Is(err, errOrderedSequenceMismatch) || errors.Is(err, ErrConsumerDeleted) || errors.Is(err, errConnected) || errors.Is(err, nats.ErrNoResponders) { // only reset if serial matches the current consumer serial and there is no reset in progress if serial == c.serial && c.resetInProgress.Load() == 0 { c.resetInProgress.Store(1) c.doReset <- struct{}{} } } } } // Messages returns MessagesContext, allowing continuously iterating // over messages on a stream. Messages cannot be used concurrently // when using ordered consumer. // // See [Consumer.Messages] for more details. func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { err := c.reset() if err != nil { return nil, err } } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { return nil, ErrOrderedConsumerConcurrentRequests } if c.consumerType == consumerTypeFetch { return nil, ErrOrderConsumerUsedAsFetch } c.consumerType = consumerTypeConsume consumeOpts, err := parseMessagesOpts(true, opts...) if err != nil { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) } opts = append(opts, WithMessagesErrOnMissingHeartbeat(true), messagesReconnectNotify()) c.stopAfterMsgsLeft = make(chan int, 1) if consumeOpts.StopAfter > 0 { c.withStopAfter = true c.stopAfter = consumeOpts.StopAfter } c.userErrHandler = consumeOpts.ErrHandler if c.stopAfter > 0 { opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) } cc, err := c.currentConsumer.Messages(opts...) if err != nil { return nil, err } c.currentSub = cc.(*pullSubscription) sub := &orderedSubscription{ consumer: c, opts: opts, done: make(chan struct{}, 1), } c.subscription = sub return sub, nil } func (s *orderedSubscription) Next() (Msg, error) { for { msg, err := s.consumer.currentSub.Next() if err != nil { if errors.Is(err, ErrMsgIteratorClosed) { s.Stop() return nil, err } if s.consumer.withStopAfter { select { case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft: default: } if s.consumer.stopAfter <= 0 { s.Stop() return nil, ErrMsgIteratorClosed } s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter) } if err := s.consumer.reset(); err != nil { if errors.Is(err, errOrderedConsumerClosed) { return nil, ErrMsgIteratorClosed } return nil, err } cc, err := s.consumer.currentConsumer.Messages(s.opts...) if err != nil { return nil, err } s.consumer.currentSub = cc.(*pullSubscription) continue } meta, err := msg.Metadata() if err != nil { return nil, err } serial := serialNumberFromConsumer(meta.Consumer) if serial != s.consumer.serial { continue } dseq := meta.Sequence.Consumer if dseq != s.consumer.cursor.deliverSeq+1 { if err := s.consumer.reset(); err != nil { if errors.Is(err, errOrderedConsumerClosed) { return nil, ErrMsgIteratorClosed } return nil, err } cc, err := s.consumer.currentConsumer.Messages(s.opts...) if err != nil { return nil, err } s.consumer.currentSub = cc.(*pullSubscription) continue } s.consumer.cursor.deliverSeq = dseq s.consumer.cursor.streamSeq = meta.Sequence.Stream return msg, nil } } func (s *orderedSubscription) Stop() { if !s.closed.CompareAndSwap(0, 1) { return } s.consumer.Lock() defer s.consumer.Unlock() if s.consumer.currentSub != nil { s.consumer.currentSub.Stop() } close(s.done) } func (s *orderedSubscription) Drain() { if !s.closed.CompareAndSwap(0, 1) { return } if s.consumer.currentSub != nil { s.consumer.currentConsumer.Lock() s.consumer.currentSub.Drain() s.consumer.currentConsumer.Unlock() } close(s.done) } // Closed returns a channel that is closed when the consuming is // fully stopped/drained. When the channel is closed, no more messages // will be received and processing is complete. func (s *orderedSubscription) Closed() <-chan struct{} { s.consumer.Lock() defer s.consumer.Unlock() closedCh := make(chan struct{}) go func() { for { s.consumer.Lock() if s.consumer.currentSub == nil { return } closed := s.consumer.currentSub.Closed() s.consumer.Unlock() // wait until the underlying pull consumer is closed <-closed // if the subscription is closed and ordered consumer is closed as well, // send a signal that the Consume() is fully stopped if s.closed.Load() == 1 { close(closedCh) return } } }() return closedCh } // Fetch is used to retrieve up to a provided number of messages from a // stream. This method will always send a single request and wait until // either all messages are retrieved or request times out. // // It is not efficient to use Fetch with on an ordered consumer, as it will // reset the consumer for each subsequent Fetch call. // Consider using [Consumer.Consume] or [Consumer.Messages] instead. func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { c.Lock() if c.consumerType == consumerTypeConsume { c.Unlock() return nil, ErrOrderConsumerUsedAsConsume } if c.runningFetch != nil { if !c.runningFetch.closed() { return nil, ErrOrderedConsumerConcurrentRequests } if c.runningFetch.sseq != 0 { c.cursor.streamSeq = c.runningFetch.sseq } } c.consumerType = consumerTypeFetch sub := orderedSubscription{ consumer: c, done: make(chan struct{}), } c.subscription = &sub c.Unlock() err := c.reset() if err != nil { return nil, err } msgs, err := c.currentConsumer.Fetch(batch, opts...) if err != nil { return nil, err } c.runningFetch = msgs.(*fetchResult) return msgs, nil } // FetchBytes is used to retrieve up to a provided bytes from the // stream. This method will always send a single request and wait until // provided number of bytes is exceeded or request times out. // // It is not efficient to use FetchBytes with on an ordered consumer, as it will // reset the consumer for each subsequent Fetch call. // Consider using [Consumer.Consume] or [Consumer.Messages] instead. func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { c.Lock() if c.consumerType == consumerTypeConsume { c.Unlock() return nil, ErrOrderConsumerUsedAsConsume } if c.runningFetch != nil { if !c.runningFetch.closed() { return nil, ErrOrderedConsumerConcurrentRequests } if c.runningFetch.sseq != 0 { c.cursor.streamSeq = c.runningFetch.sseq } } c.consumerType = consumerTypeFetch sub := orderedSubscription{ consumer: c, done: make(chan struct{}), } c.subscription = &sub c.Unlock() err := c.reset() if err != nil { return nil, err } msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...) if err != nil { return nil, err } c.runningFetch = msgs.(*fetchResult) return msgs, nil } // FetchNoWait is used to retrieve up to a provided number of messages // from a stream. This method will always send a single request and // immediately return up to a provided number of messages or wait until // at least one message is available or request times out. // // It is not efficient to use FetchNoWait with on an ordered consumer, as it will // reset the consumer for each subsequent Fetch call. // Consider using [Consumer.Consume] or [Consumer.Messages] instead. func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) { if c.consumerType == consumerTypeConsume { return nil, ErrOrderConsumerUsedAsConsume } if c.runningFetch != nil && !c.runningFetch.done { return nil, ErrOrderedConsumerConcurrentRequests } c.consumerType = consumerTypeFetch sub := orderedSubscription{ consumer: c, done: make(chan struct{}), } c.subscription = &sub err := c.reset() if err != nil { return nil, err } return c.currentConsumer.FetchNoWait(batch) } // Next is used to retrieve the next message from the stream. This // method will block until the message is retrieved or timeout is // reached. // // It is not efficient to use Next with on an ordered consumer, as it will // reset the consumer for each subsequent Fetch call. // Consider using [Consumer.Consume] or [Consumer.Messages] instead. func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) { res, err := c.Fetch(1, opts...) if err != nil { return nil, err } msg := <-res.Messages() if msg != nil { return msg, nil } if res.Error() == nil { return nil, nats.ErrTimeout } return nil, res.Error() } func serialNumberFromConsumer(name string) int { if len(name) == 0 { return 0 } parts := strings.Split(name, "_") if len(parts) < 2 { return 0 } serial, err := strconv.Atoi(parts[len(parts)-1]) if err != nil { return 0 } return serial } func (c *orderedConsumer) reset() error { c.Lock() defer c.Unlock() defer c.resetInProgress.Store(0) if c.currentConsumer != nil { c.currentConsumer.Lock() if c.currentSub != nil { c.currentSub.Stop() } consName := c.currentConsumer.CachedInfo().Name c.currentConsumer.Unlock() go func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _ = c.js.DeleteConsumer(ctx, c.stream, consName) cancel() }() } c.cursor.deliverSeq = 0 consumerConfig := c.getConsumerConfig() var err error var cons Consumer backoffOpts := backoffOpts{ attempts: c.cfg.MaxResetAttempts, initialInterval: time.Second, factor: 2, maxInterval: 10 * time.Second, cancel: c.subscription.done, } err = retryWithBackoff(func(attempt int) (bool, error) { isClosed := c.subscription.closed.Load() == 1 if isClosed { return false, errOrderedConsumerClosed } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cons, err = c.js.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig) if err != nil { return true, err } return false, nil }, backoffOpts) if err != nil { return err } c.currentConsumer = cons.(*pullConsumer) return nil } func (c *orderedConsumer) getConsumerConfig() *ConsumerConfig { c.serial++ var nextSeq uint64 // if stream sequence is not initialized, no message was consumed yet // therefore, start from the beginning (either from 1 or from the provided sequence) if c.cursor.streamSeq == 0 { if c.cfg.OptStartSeq != 0 { nextSeq = c.cfg.OptStartSeq } else { nextSeq = 1 } } else { // otherwise, start from the next sequence nextSeq = c.cursor.streamSeq + 1 } if c.cfg.MaxResetAttempts == 0 { c.cfg.MaxResetAttempts = -1 } name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial) cfg := &ConsumerConfig{ Name: name, DeliverPolicy: DeliverByStartSequencePolicy, OptStartSeq: nextSeq, AckPolicy: AckNonePolicy, InactiveThreshold: 5 * time.Minute, Replicas: 1, HeadersOnly: c.cfg.HeadersOnly, MemoryStorage: true, Metadata: c.cfg.Metadata, } if len(c.cfg.FilterSubjects) == 1 { cfg.FilterSubject = c.cfg.FilterSubjects[0] } else { cfg.FilterSubjects = c.cfg.FilterSubjects } if c.cfg.InactiveThreshold != 0 { cfg.InactiveThreshold = c.cfg.InactiveThreshold } // if the cursor is not yet set, use the provided deliver policy if c.cursor.streamSeq != 0 { return cfg } // initial request, some options may be modified at that point cfg.DeliverPolicy = c.cfg.DeliverPolicy if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy || c.cfg.DeliverPolicy == DeliverLastPolicy || c.cfg.DeliverPolicy == DeliverNewPolicy || c.cfg.DeliverPolicy == DeliverAllPolicy { cfg.OptStartSeq = 0 } else if c.cfg.DeliverPolicy == DeliverByStartTimePolicy { cfg.OptStartSeq = 0 cfg.OptStartTime = c.cfg.OptStartTime } else { cfg.OptStartSeq = c.cfg.OptStartSeq } if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 { cfg.FilterSubjects = []string{">"} } return cfg } func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt { return pullOptFunc(func(opts *consumeOpts) error { opts.StopAfter = numMsgs opts.stopAfterMsgsLeft = msgsLeftAfterStop return nil }) } func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt { return pullOptFunc(func(opts *consumeOpts) error { opts.StopAfter = numMsgs opts.stopAfterMsgsLeft = msgsLeftAfterStop return nil }) } func consumeReconnectNotify() PullConsumeOpt { return pullOptFunc(func(opts *consumeOpts) error { opts.notifyOnReconnect = true return nil }) } func messagesReconnectNotify() PullMessagesOpt { return pullOptFunc(func(opts *consumeOpts) error { opts.notifyOnReconnect = true return nil }) } // Info returns information about the ordered consumer. // Note that this method will fetch the latest instance of the // consumer from the server, which can be deleted by the library at any time. func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { c.Lock() defer c.Unlock() if c.currentConsumer == nil { return nil, ErrOrderedConsumerNotCreated } infoSubject := fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name) var resp consumerInfoResponse if _, err := c.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { return nil, ErrConsumerNotFound } return nil, resp.Error } if resp.Error == nil && resp.ConsumerInfo == nil { return nil, ErrConsumerNotFound } c.currentConsumer.info = resp.ConsumerInfo return resp.ConsumerInfo, nil } // CachedInfo returns cached information about the consumer currently // used by the ordered consumer. Cached info will be updated on every call // to [Consumer.Info] or on consumer reset. func (c *orderedConsumer) CachedInfo() *ConsumerInfo { c.Lock() defer c.Unlock() if c.currentConsumer == nil { return nil } return c.currentConsumer.info } type backoffOpts struct { // total retry attempts // -1 for unlimited attempts int // initial interval after which first retry will be performed // defaults to 1s initialInterval time.Duration // determines whether first function execution should be performed immediately disableInitialExecution bool // multiplier on each attempt // defaults to 2 factor float64 // max interval between retries // after reaching this value, all subsequent // retries will be performed with this interval // defaults to 1 minute maxInterval time.Duration // custom backoff intervals // if set, overrides all other options except attempts // if attempts are set, then the last interval will be used // for all subsequent retries after reaching the limit customBackoff []time.Duration // cancel channel // if set, retry will be canceled when this channel is closed cancel <-chan struct{} } func retryWithBackoff(f func(int) (bool, error), opts backoffOpts) error { var err error var shouldContinue bool // if custom backoff is set, use it instead of other options if len(opts.customBackoff) > 0 { if opts.attempts != 0 { return errors.New("cannot use custom backoff intervals when attempts are set") } for i, interval := range opts.customBackoff { select { case <-opts.cancel: return nil case <-time.After(interval): } shouldContinue, err = f(i) if !shouldContinue { return err } } return err } // set default options if opts.initialInterval == 0 { opts.initialInterval = 1 * time.Second } if opts.factor == 0 { opts.factor = 2 } if opts.maxInterval == 0 { opts.maxInterval = 1 * time.Minute } if opts.attempts == 0 { return errors.New("retry attempts have to be set when not using custom backoff intervals") } interval := opts.initialInterval for i := 0; ; i++ { if i == 0 && opts.disableInitialExecution { time.Sleep(interval) continue } shouldContinue, err = f(i) if !shouldContinue { return err } if opts.attempts > 0 && i >= opts.attempts-1 { break } select { case <-opts.cancel: return nil case <-time.After(interval): } interval = time.Duration(float64(interval) * opts.factor) if interval >= opts.maxInterval { interval = opts.maxInterval } } return err } nats.go-1.41.0/jetstream/publish.go000066400000000000000000000406711477351342400171630ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "crypto/sha256" "encoding/json" "errors" "fmt" "math/rand" "strconv" "strings" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) type ( asyncPublisherOpts struct { // For async publish error handling. aecb MsgErrHandler // Max async pub ack in flight maxpa int // ackTimeout is the max time to wait for an ack. ackTimeout time.Duration } // PublishOpt are the options that can be passed to Publish methods. PublishOpt func(*pubOpts) error pubOpts struct { id string lastMsgID string // Expected last msgId stream string // Expected stream name lastSeq *uint64 // Expected last sequence lastSubjectSeq *uint64 // Expected last sequence per subject ttl time.Duration // Message TTL // Publish retries for NoResponders err. retryWait time.Duration // Retry wait between attempts retryAttempts int // Retry attempts // stallWait is the max wait of a async pub ack. stallWait time.Duration // internal option to re-use existing paf in case of retry. pafRetry *pubAckFuture } // PubAckFuture is a future for a PubAck. // It can be used to wait for a PubAck or an error after an async publish. PubAckFuture interface { // Ok returns a receive only channel that can be used to get a PubAck. Ok() <-chan *PubAck // Err returns a receive only channel that can be used to get the error from an async publish. Err() <-chan error // Msg returns the message that was sent to the server. Msg() *nats.Msg } pubAckFuture struct { jsClient *jetStreamClient msg *nats.Msg retries int maxRetries int retryWait time.Duration ack *PubAck err error errCh chan error doneCh chan *PubAck reply string timeout *time.Timer } jetStreamClient struct { asyncPublishContext asyncPublisherOpts } // MsgErrHandler is used to process asynchronous errors from JetStream // PublishAsync. It will return the original message sent to the server for // possible retransmitting and the error encountered. MsgErrHandler func(JetStream, *nats.Msg, error) asyncPublishContext struct { sync.RWMutex replyPrefix string replySub *nats.Subscription acks map[string]*pubAckFuture stallCh chan struct{} doneCh chan struct{} rr *rand.Rand // channel to signal when server is disconnected or conn is closed connStatusCh chan (nats.Status) } pubAckResponse struct { apiResponse *PubAck } // PubAck is an ack received after successfully publishing a message. PubAck struct { // Stream is the stream name the message was published to. Stream string `json:"stream"` // Sequence is the stream sequence number of the message. Sequence uint64 `json:"seq"` // Duplicate indicates whether the message was a duplicate. // Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates]. Duplicate bool `json:"duplicate,omitempty"` // Domain is the domain the message was published to. Domain string `json:"domain,omitempty"` } ) const ( // Default time wait between retries on Publish if err is ErrNoResponders. DefaultPubRetryWait = 250 * time.Millisecond // Default number of retries DefaultPubRetryAttempts = 2 ) const ( statusHdr = "Status" rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // Publish performs a synchronous publish to a stream and waits for ack // from server. It accepts subject name (which must be bound to a stream) // and message payload. func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) { return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...) } // PublishMsg performs a synchronous publish to a stream and waits for // ack from server. It accepts subject name (which must be bound to a // stream) and nats.Message. func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) { ctx, cancel := js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } o := pubOpts{ retryWait: DefaultPubRetryWait, retryAttempts: DefaultPubRetryAttempts, } if len(opts) > 0 { if m.Header == nil { m.Header = nats.Header{} } for _, opt := range opts { if err := opt(&o); err != nil { return nil, err } } } if o.stallWait > 0 { return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption) } if o.id != "" { m.Header.Set(MsgIDHeader, o.id) } if o.lastMsgID != "" { m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) } if o.stream != "" { m.Header.Set(ExpectedStreamHeader, o.stream) } if o.lastSeq != nil { m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) } if o.lastSubjectSeq != nil { m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) } if o.ttl > 0 { m.Header.Set(MsgTTLHeader, o.ttl.String()) } var resp *nats.Msg var err error resp, err = js.conn.RequestMsgWithContext(ctx, m) if err != nil { for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ { // To protect against small blips in leadership changes etc, if we get a no responders here retry. select { case <-ctx.Done(): case <-time.After(o.retryWait): } resp, err = js.conn.RequestMsgWithContext(ctx, m) } if err != nil { if errors.Is(err, nats.ErrNoResponders) { return nil, ErrNoStreamResponse } return nil, err } } var ackResp pubAckResponse if err := json.Unmarshal(resp.Data, &ackResp); err != nil { return nil, ErrInvalidJSAck } if ackResp.Error != nil { return nil, fmt.Errorf("nats: %w", ackResp.Error) } if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" { return nil, ErrInvalidJSAck } return ackResp.PubAck, nil } // PublishAsync performs an asynchronous publish to a stream and returns // [PubAckFuture] interface. It accepts subject name (which must be bound // to a stream) and message payload. func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) { return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...) } // PublishMsgAsync performs an asynchronous publish to a stream and // returns [PubAckFuture] interface. It accepts subject name (which must // be bound to a stream) and nats.Message. func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) { o := pubOpts{ retryWait: DefaultPubRetryWait, retryAttempts: DefaultPubRetryAttempts, } if len(opts) > 0 { if m.Header == nil { m.Header = nats.Header{} } for _, opt := range opts { if err := opt(&o); err != nil { return nil, err } } } defaultStallWait := 200 * time.Millisecond stallWait := defaultStallWait if o.stallWait > 0 { stallWait = o.stallWait } if o.id != "" { m.Header.Set(MsgIDHeader, o.id) } if o.lastMsgID != "" { m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) } if o.stream != "" { m.Header.Set(ExpectedStreamHeader, o.stream) } if o.lastSeq != nil { m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) } if o.lastSubjectSeq != nil { m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) } if o.ttl > 0 { m.Header.Set(MsgTTLHeader, o.ttl.String()) } paf := o.pafRetry if paf == nil && m.Reply != "" { return nil, ErrAsyncPublishReplySubjectSet } var id string var reply string // register new paf if not retrying if paf == nil { var err error reply, err = js.newAsyncReply() if err != nil { return nil, fmt.Errorf("nats: error creating async reply handler: %s", err) } id = reply[js.opts.replyPrefixLen:] paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait, reply: reply} numPending, maxPending := js.registerPAF(id, paf) if maxPending > 0 && numPending > maxPending { select { case <-js.asyncStall(): case <-time.After(stallWait): js.clearPAF(id) return nil, ErrTooManyStalledMsgs } } if js.publisher.ackTimeout > 0 { paf.timeout = time.AfterFunc(js.publisher.ackTimeout, func() { js.publisher.Lock() defer js.publisher.Unlock() if _, ok := js.publisher.acks[id]; !ok { // paf has already been resolved // while waiting for the lock return } // ack timed out, remove from pending acks delete(js.publisher.acks, id) // check on anyone stalled and waiting. if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa { close(js.publisher.stallCh) js.publisher.stallCh = nil } // send error to user paf.err = ErrAsyncPublishTimeout if paf.errCh != nil { paf.errCh <- paf.err } // call error callback if set if js.publisher.asyncPublisherOpts.aecb != nil { js.publisher.asyncPublisherOpts.aecb(js, paf.msg, ErrAsyncPublishTimeout) } // check on anyone one waiting on done status. if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 { close(js.publisher.doneCh) js.publisher.doneCh = nil } }) } } else { // when retrying, get the ID from existing reply subject reply = paf.reply if paf.timeout != nil { paf.timeout.Reset(js.publisher.ackTimeout) } id = reply[js.opts.replyPrefixLen:] } pubMsg := &nats.Msg{ Subject: m.Subject, Reply: reply, Data: m.Data, Header: m.Header, } if err := js.conn.PublishMsg(pubMsg); err != nil { js.clearPAF(id) return nil, err } return paf, nil } // For quick token lookup etc. const ( aReplyTokensize = 6 ) func (js *jetStream) newAsyncReply() (string, error) { js.publisher.Lock() if js.publisher.replySub == nil { // Create our wildcard reply subject. sha := sha256.New() sha.Write([]byte(nuid.Next())) b := sha.Sum(nil) for i := 0; i < aReplyTokensize; i++ { b[i] = rdigits[int(b[i]%base)] } js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.opts.replyPrefix, b[:aReplyTokensize]) sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply) if err != nil { js.publisher.Unlock() return "", err } js.publisher.replySub = sub js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano())) } if js.publisher.connStatusCh == nil { js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED) go js.resetPendingAcksOnReconnect() } var sb strings.Builder sb.WriteString(js.publisher.replyPrefix) for { rn := js.publisher.rr.Int63() var b [aReplyTokensize]byte for i, l := 0, rn; i < len(b); i++ { b[i] = rdigits[l%base] l /= base } if _, ok := js.publisher.acks[string(b[:])]; ok { continue } sb.Write(b[:]) break } js.publisher.Unlock() return sb.String(), nil } // Handle an async reply from PublishAsync. func (js *jetStream) handleAsyncReply(m *nats.Msg) { if len(m.Subject) <= js.opts.replyPrefixLen { return } id := m.Subject[js.opts.replyPrefixLen:] js.publisher.Lock() paf := js.getPAF(id) if paf == nil { js.publisher.Unlock() return } closeStc := func() { // Check on anyone stalled and waiting. if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa { close(js.publisher.stallCh) js.publisher.stallCh = nil } } closeDchFn := func() func() { var dch chan struct{} // Check on anyone one waiting on done status. if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 { dch = js.publisher.doneCh js.publisher.doneCh = nil } // Return function to close done channel which // should be deferred so that error is processed and // can be checked. return func() { if dch != nil { close(dch) } } } doErr := func(err error) { paf.err = err if paf.errCh != nil { paf.errCh <- paf.err } cb := js.publisher.asyncPublisherOpts.aecb js.publisher.Unlock() if cb != nil { cb(js, paf.msg, err) } } if paf.timeout != nil { paf.timeout.Stop() } // Process no responders etc. if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { if paf.retries < paf.maxRetries { paf.retries++ time.AfterFunc(paf.retryWait, func() { js.publisher.Lock() paf := js.getPAF(id) js.publisher.Unlock() if paf == nil { return } _, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error { po.pafRetry = paf return nil }) if err != nil { js.publisher.Lock() doErr(err) } }) js.publisher.Unlock() return } delete(js.publisher.acks, id) closeStc() defer closeDchFn()() doErr(ErrNoStreamResponse) return } // Remove delete(js.publisher.acks, id) closeStc() defer closeDchFn()() var pa pubAckResponse if err := json.Unmarshal(m.Data, &pa); err != nil { doErr(ErrInvalidJSAck) return } if pa.Error != nil { doErr(pa.Error) return } if pa.PubAck == nil || pa.PubAck.Stream == "" { doErr(ErrInvalidJSAck) return } // So here we have received a proper puback. paf.ack = pa.PubAck if paf.doneCh != nil { paf.doneCh <- paf.ack } js.publisher.Unlock() } func (js *jetStream) resetPendingAcksOnReconnect() { js.publisher.Lock() connStatusCh := js.publisher.connStatusCh js.publisher.Unlock() for { newStatus, ok := <-connStatusCh if !ok || newStatus == nats.CLOSED { return } js.publisher.Lock() errCb := js.publisher.asyncPublisherOpts.aecb for id, paf := range js.publisher.acks { paf.err = nats.ErrDisconnected if paf.errCh != nil { paf.errCh <- paf.err } if errCb != nil { defer errCb(js, paf.msg, nats.ErrDisconnected) } delete(js.publisher.acks, id) } if js.publisher.doneCh != nil { close(js.publisher.doneCh) js.publisher.doneCh = nil } js.publisher.Unlock() } } // registerPAF will register for a PubAckFuture. func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) { js.publisher.Lock() if js.publisher.acks == nil { js.publisher.acks = make(map[string]*pubAckFuture) } js.publisher.acks[id] = paf np := len(js.publisher.acks) maxpa := js.publisher.asyncPublisherOpts.maxpa js.publisher.Unlock() return np, maxpa } // Lock should be held. func (js *jetStream) getPAF(id string) *pubAckFuture { if js.publisher.acks == nil { return nil } return js.publisher.acks[id] } // clearPAF will remove a PubAckFuture that was registered. func (js *jetStream) clearPAF(id string) { js.publisher.Lock() delete(js.publisher.acks, id) js.publisher.Unlock() } func (js *jetStream) asyncStall() <-chan struct{} { js.publisher.Lock() if js.publisher.stallCh == nil { js.publisher.stallCh = make(chan struct{}) } stc := js.publisher.stallCh js.publisher.Unlock() return stc } func (paf *pubAckFuture) Ok() <-chan *PubAck { paf.jsClient.Lock() defer paf.jsClient.Unlock() if paf.doneCh == nil { paf.doneCh = make(chan *PubAck, 1) if paf.ack != nil { paf.doneCh <- paf.ack } } return paf.doneCh } func (paf *pubAckFuture) Err() <-chan error { paf.jsClient.Lock() defer paf.jsClient.Unlock() if paf.errCh == nil { paf.errCh = make(chan error, 1) if paf.err != nil { paf.errCh <- paf.err } } return paf.errCh } func (paf *pubAckFuture) Msg() *nats.Msg { paf.jsClient.RLock() defer paf.jsClient.RUnlock() return paf.msg } // PublishAsyncPending returns the number of async publishes outstanding // for this context. func (js *jetStream) PublishAsyncPending() int { js.publisher.RLock() defer js.publisher.RUnlock() return len(js.publisher.acks) } // PublishAsyncComplete returns a channel that will be closed when all // outstanding asynchronously published messages are acknowledged by the // server. func (js *jetStream) PublishAsyncComplete() <-chan struct{} { js.publisher.Lock() defer js.publisher.Unlock() if js.publisher.doneCh == nil { js.publisher.doneCh = make(chan struct{}) } dch := js.publisher.doneCh if len(js.publisher.acks) == 0 { close(js.publisher.doneCh) js.publisher.doneCh = nil } return dch } nats.go-1.41.0/jetstream/pull.go000066400000000000000000000737451477351342400165010ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "encoding/json" "errors" "fmt" "math" "slices" "sync" "sync/atomic" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/internal/syncx" "github.com/nats-io/nuid" ) type ( // MessagesContext supports iterating over a messages on a stream. // It is returned by [Consumer.Messages] method. MessagesContext interface { // Next retrieves next message on a stream. It will block until the next // message is available. If the context is canceled, Next will return // ErrMsgIteratorClosed error. Next() (Msg, error) // Stop unsubscribes from the stream and cancels subscription. Calling // Next after calling Stop will return ErrMsgIteratorClosed error. // All messages that are already in the buffer are discarded. Stop() // Drain unsubscribes from the stream and cancels subscription. All // messages that are already in the buffer will be available on // subsequent calls to Next. After the buffer is drained, Next will // return ErrMsgIteratorClosed error. Drain() } // ConsumeContext supports processing incoming messages from a stream. // It is returned by [Consumer.Consume] method. ConsumeContext interface { // Stop unsubscribes from the stream and cancels subscription. // No more messages will be received after calling this method. // All messages that are already in the buffer are discarded. Stop() // Drain unsubscribes from the stream and cancels subscription. // All messages that are already in the buffer will be processed in callback function. Drain() // Closed returns a channel that is closed when the consuming is // fully stopped/drained. When the channel is closed, no more messages // will be received and processing is complete. Closed() <-chan struct{} } // MessageHandler is a handler function used as callback in [Consume]. MessageHandler func(msg Msg) // PullConsumeOpt represent additional options used in [Consume] for pull consumers. PullConsumeOpt interface { configureConsume(*consumeOpts) error } // PullMessagesOpt represent additional options used in [Messages] for pull consumers. PullMessagesOpt interface { configureMessages(*consumeOpts) error } pullConsumer struct { sync.Mutex js *jetStream stream string durable bool name string info *ConsumerInfo subs syncx.Map[string, *pullSubscription] pinID string } pullRequest struct { Expires time.Duration `json:"expires,omitempty"` Batch int `json:"batch,omitempty"` MaxBytes int `json:"max_bytes,omitempty"` NoWait bool `json:"no_wait,omitempty"` Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` MinPending int64 `json:"min_pending,omitempty"` MinAckPending int64 `json:"min_ack_pending,omitempty"` PinID string `json:"id,omitempty"` Group string `json:"group,omitempty"` } consumeOpts struct { Expires time.Duration MaxMessages int MaxBytes int LimitSize bool MinPending int64 MinAckPending int64 Group string Heartbeat time.Duration ErrHandler ConsumeErrHandlerFunc ReportMissingHeartbeats bool ThresholdMessages int ThresholdBytes int StopAfter int stopAfterMsgsLeft chan int notifyOnReconnect bool } ConsumeErrHandlerFunc func(consumeCtx ConsumeContext, err error) pullSubscription struct { sync.Mutex id string consumer *pullConsumer subscription *nats.Subscription msgs chan *nats.Msg errs chan error pending pendingMsgs hbMonitor *hbMonitor fetchInProgress atomic.Uint32 closed atomic.Uint32 draining atomic.Uint32 done chan struct{} connStatusChanged chan nats.Status fetchNext chan *pullRequest consumeOpts *consumeOpts delivered int closedCh chan struct{} } pendingMsgs struct { msgCount int byteCount int } MessageBatch interface { Messages() <-chan Msg Error() error } fetchResult struct { sync.Mutex msgs chan Msg err error done bool sseq uint64 } FetchOpt func(*pullRequest) error hbMonitor struct { timer *time.Timer sync.Mutex } ) const ( DefaultMaxMessages = 500 DefaultExpires = 30 * time.Second defaultBatchMaxBytesOnly = 1_000_000 unset = -1 ) func min(x, y int) int { if x < y { return x } return y } // Consume can be used to continuously receive messages and handle them // with the provided callback function. Consume cannot be used concurrently // when using ordered consumer. // // See [Consumer.Consume] for more details. func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { if handler == nil { return nil, ErrHandlerRequired } consumeOpts, err := parseConsumeOpts(false, opts...) if err != nil { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) } if len(p.info.Config.PriorityGroups) != 0 { if consumeOpts.Group == "" { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is required for priority consumer") } if !slices.Contains(p.info.Config.PriorityGroups, consumeOpts.Group) { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "invalid priority group") } } else if consumeOpts.Group != "" { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is not supported for this consumer") } p.Lock() subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) consumeID := nuid.Next() sub := &pullSubscription{ id: consumeID, consumer: p, errs: make(chan error, 10), done: make(chan struct{}, 1), fetchNext: make(chan *pullRequest, 1), consumeOpts: consumeOpts, } sub.connStatusChanged = p.js.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) sub.hbMonitor = sub.scheduleHeartbeatCheck(consumeOpts.Heartbeat) p.subs.Store(sub.id, sub) p.Unlock() internalHandler := func(msg *nats.Msg) { if sub.hbMonitor != nil { sub.hbMonitor.Stop() } userMsg, msgErr := checkMsg(msg) if !userMsg && msgErr == nil { if sub.hbMonitor != nil { sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) } return } defer func() { sub.Lock() sub.checkPending() if sub.hbMonitor != nil { sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) } sub.Unlock() }() if !userMsg { // heartbeat message if msgErr == nil { return } sub.Lock() err := sub.handleStatusMsg(msg, msgErr) sub.Unlock() if err != nil { if sub.closed.Load() == 1 { return } if sub.consumeOpts.ErrHandler != nil { sub.consumeOpts.ErrHandler(sub, err) } sub.Stop() } return } if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { p.setPinID(pinId) } handler(p.js.toJSMsg(msg)) sub.Lock() sub.decrementPendingMsgs(msg) sub.incrementDeliveredMsgs() sub.Unlock() if sub.consumeOpts.StopAfter > 0 && sub.consumeOpts.StopAfter == sub.delivered { sub.Stop() } } inbox := p.js.conn.NewInbox() sub.subscription, err = p.js.conn.Subscribe(inbox, internalHandler) if err != nil { return nil, err } sub.subscription.SetClosedHandler(func(sid string) func(string) { return func(subject string) { p.subs.Delete(sid) sub.draining.CompareAndSwap(1, 0) sub.Lock() if sub.closedCh != nil { close(sub.closedCh) sub.closedCh = nil } sub.Unlock() } }(sub.id)) sub.Lock() // initial pull sub.resetPendingMsgs() batchSize := sub.consumeOpts.MaxMessages if sub.consumeOpts.StopAfter > 0 { batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) } if err := sub.pull(&pullRequest{ Expires: consumeOpts.Expires, Batch: batchSize, MaxBytes: consumeOpts.MaxBytes, Heartbeat: consumeOpts.Heartbeat, MinPending: consumeOpts.MinPending, MinAckPending: consumeOpts.MinAckPending, Group: consumeOpts.Group, PinID: p.getPinID(), }, subject); err != nil { sub.errs <- err } sub.Unlock() go func() { isConnected := true for { if sub.closed.Load() == 1 { return } select { case status, ok := <-sub.connStatusChanged: if !ok { continue } if status == nats.RECONNECTING { if sub.hbMonitor != nil { sub.hbMonitor.Stop() } isConnected = false } if status == nats.CONNECTED { sub.Lock() if !isConnected { isConnected = true if sub.consumeOpts.notifyOnReconnect { sub.errs <- errConnected } sub.fetchNext <- &pullRequest{ Expires: sub.consumeOpts.Expires, Batch: sub.consumeOpts.MaxMessages, MaxBytes: sub.consumeOpts.MaxBytes, Heartbeat: sub.consumeOpts.Heartbeat, MinPending: sub.consumeOpts.MinPending, MinAckPending: sub.consumeOpts.MinAckPending, Group: sub.consumeOpts.Group, PinID: p.getPinID(), } if sub.hbMonitor != nil { sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) } sub.resetPendingMsgs() } sub.Unlock() } case err := <-sub.errs: sub.Lock() if sub.consumeOpts.ErrHandler != nil { sub.consumeOpts.ErrHandler(sub, err) } if errors.Is(err, ErrNoHeartbeat) { batchSize := sub.consumeOpts.MaxMessages if sub.consumeOpts.StopAfter > 0 { batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) } sub.fetchNext <- &pullRequest{ Expires: sub.consumeOpts.Expires, Batch: batchSize, MaxBytes: sub.consumeOpts.MaxBytes, Heartbeat: sub.consumeOpts.Heartbeat, MinPending: sub.consumeOpts.MinPending, MinAckPending: sub.consumeOpts.MinAckPending, Group: sub.consumeOpts.Group, PinID: p.getPinID(), } if sub.hbMonitor != nil { sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) } sub.resetPendingMsgs() } sub.Unlock() case <-sub.done: return } } }() go sub.pullMessages(subject) return sub, nil } // resetPendingMsgs resets pending message count and byte count // to the values set in consumeOpts // lock should be held before calling this method func (s *pullSubscription) resetPendingMsgs() { s.pending.msgCount = s.consumeOpts.MaxMessages s.pending.byteCount = s.consumeOpts.MaxBytes } // decrementPendingMsgs decrements pending message count and byte count // lock should be held before calling this method func (s *pullSubscription) decrementPendingMsgs(msg *nats.Msg) { s.pending.msgCount-- if s.consumeOpts.MaxBytes != 0 && !s.consumeOpts.LimitSize { s.pending.byteCount -= msg.Size() } } // incrementDeliveredMsgs increments delivered message count // lock should be held before calling this method func (s *pullSubscription) incrementDeliveredMsgs() { s.delivered++ } // checkPending verifies whether there are enough messages in // the buffer to trigger a new pull request. // lock should be held before calling this method func (s *pullSubscription) checkPending() { // check if we went below any threshold // we don't want to track bytes threshold if either it's not set or we used // PullMaxMessagesWithBytesLimit if (s.pending.msgCount < s.consumeOpts.ThresholdMessages || (s.pending.byteCount < s.consumeOpts.ThresholdBytes && s.consumeOpts.MaxBytes != 0 && !s.consumeOpts.LimitSize)) && s.fetchInProgress.Load() == 0 { var batchSize, maxBytes int batchSize = s.consumeOpts.MaxMessages - s.pending.msgCount if s.consumeOpts.MaxBytes != 0 { if s.consumeOpts.LimitSize { maxBytes = s.consumeOpts.MaxBytes } else { maxBytes = s.consumeOpts.MaxBytes - s.pending.byteCount // when working with max bytes only, always ask for full batch batchSize = s.consumeOpts.MaxMessages } } if s.consumeOpts.StopAfter > 0 { batchSize = min(batchSize, s.consumeOpts.StopAfter-s.delivered-s.pending.msgCount) } if batchSize > 0 { pinID := "" if s.consumer != nil { pinID = s.consumer.getPinID() } s.fetchNext <- &pullRequest{ Expires: s.consumeOpts.Expires, Batch: batchSize, MaxBytes: maxBytes, Heartbeat: s.consumeOpts.Heartbeat, PinID: pinID, Group: s.consumeOpts.Group, MinPending: s.consumeOpts.MinPending, MinAckPending: s.consumeOpts.MinAckPending, } s.pending.msgCount = s.consumeOpts.MaxMessages s.pending.byteCount = s.consumeOpts.MaxBytes } } } // Messages returns MessagesContext, allowing continuously iterating // over messages on a stream. Messages cannot be used concurrently // when using ordered consumer. // // See [Consumer.Messages] for more details. func (p *pullConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { consumeOpts, err := parseMessagesOpts(false, opts...) if err != nil { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) } if len(p.info.Config.PriorityGroups) != 0 { if consumeOpts.Group == "" { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is required for priority consumer") } if !slices.Contains(p.info.Config.PriorityGroups, consumeOpts.Group) { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "invalid priority group") } } else if consumeOpts.Group != "" { return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is not supported for this consumer") } p.Lock() subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) msgs := make(chan *nats.Msg, consumeOpts.MaxMessages) consumeID := nuid.Next() sub := &pullSubscription{ id: consumeID, consumer: p, done: make(chan struct{}, 1), msgs: msgs, errs: make(chan error, 10), fetchNext: make(chan *pullRequest, 1), consumeOpts: consumeOpts, } sub.connStatusChanged = p.js.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) inbox := p.js.conn.NewInbox() sub.subscription, err = p.js.conn.ChanSubscribe(inbox, sub.msgs) if err != nil { p.Unlock() return nil, err } sub.subscription.SetClosedHandler(func(sid string) func(string) { return func(subject string) { if sub.draining.Load() != 1 { // if we're not draining, subscription can be closed as soon // as closed handler is called // otherwise, we need to wait until all messages are drained // in Next p.subs.Delete(sid) } close(msgs) } }(sub.id)) p.subs.Store(sub.id, sub) p.Unlock() go sub.pullMessages(subject) go func() { for { select { case status, ok := <-sub.connStatusChanged: if !ok { return } if status == nats.CONNECTED { sub.errs <- errConnected } if status == nats.RECONNECTING { sub.errs <- errDisconnected } case <-sub.done: return } } }() return sub, nil } var ( errConnected = errors.New("connected") errDisconnected = errors.New("disconnected") ) // Next retrieves next message on a stream. It will block until the next // message is available. If the context is canceled, Next will return // ErrMsgIteratorClosed error. func (s *pullSubscription) Next() (Msg, error) { s.Lock() defer s.Unlock() drainMode := s.draining.Load() == 1 closed := s.closed.Load() == 1 if closed && !drainMode { return nil, ErrMsgIteratorClosed } hbMonitor := s.scheduleHeartbeatCheck(s.consumeOpts.Heartbeat) defer func() { if hbMonitor != nil { hbMonitor.Stop() } }() isConnected := true if s.consumeOpts.StopAfter > 0 && s.delivered >= s.consumeOpts.StopAfter { s.Stop() return nil, ErrMsgIteratorClosed } for { s.checkPending() select { case msg, ok := <-s.msgs: if !ok { // if msgs channel is closed, it means that subscription was either drained or stopped s.consumer.subs.Delete(s.id) s.draining.CompareAndSwap(1, 0) return nil, ErrMsgIteratorClosed } if hbMonitor != nil { hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) } userMsg, msgErr := checkMsg(msg) if !userMsg { // heartbeat message if msgErr == nil { continue } if err := s.handleStatusMsg(msg, msgErr); err != nil { s.Stop() return nil, err } continue } if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { s.consumer.setPinID(pinId) } s.decrementPendingMsgs(msg) s.incrementDeliveredMsgs() return s.consumer.js.toJSMsg(msg), nil case err := <-s.errs: if errors.Is(err, ErrNoHeartbeat) { s.pending.msgCount = 0 s.pending.byteCount = 0 if s.consumeOpts.ReportMissingHeartbeats { return nil, err } if hbMonitor != nil { hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) } } if errors.Is(err, errConnected) { if !isConnected { isConnected = true if s.consumeOpts.notifyOnReconnect { return nil, errConnected } s.pending.msgCount = 0 s.pending.byteCount = 0 if hbMonitor != nil { hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) } } } if errors.Is(err, errDisconnected) { if hbMonitor != nil { hbMonitor.Stop() } isConnected = false } } } } func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error { if !errors.Is(msgErr, nats.ErrTimeout) && !errors.Is(msgErr, ErrMaxBytesExceeded) && !errors.Is(msgErr, ErrBatchCompleted) { if errors.Is(msgErr, ErrConsumerDeleted) || errors.Is(msgErr, ErrBadRequest) { return msgErr } if errors.Is(msgErr, ErrPinIDMismatch) { s.consumer.setPinID("") s.pending.msgCount = 0 s.pending.byteCount = 0 } if s.consumeOpts.ErrHandler != nil { s.consumeOpts.ErrHandler(s, msgErr) } if errors.Is(msgErr, ErrConsumerLeadershipChanged) { s.pending.msgCount = 0 s.pending.byteCount = 0 } return nil } msgsLeft, bytesLeft, err := parsePending(msg) if err != nil { return err } s.pending.msgCount -= msgsLeft if s.pending.msgCount < 0 { s.pending.msgCount = 0 } if s.consumeOpts.MaxBytes > 0 && !s.consumeOpts.LimitSize { s.pending.byteCount -= bytesLeft if s.pending.byteCount < 0 { s.pending.byteCount = 0 } } return nil } func (hb *hbMonitor) Stop() { hb.Mutex.Lock() hb.timer.Stop() hb.Mutex.Unlock() } func (hb *hbMonitor) Reset(dur time.Duration) { hb.Mutex.Lock() hb.timer.Reset(dur) hb.Mutex.Unlock() } // Stop unsubscribes from the stream and cancels subscription. Calling // Next after calling Stop will return ErrMsgIteratorClosed error. // All messages that are already in the buffer are discarded. func (s *pullSubscription) Stop() { if !s.closed.CompareAndSwap(0, 1) { return } close(s.done) if s.consumeOpts.stopAfterMsgsLeft != nil { if s.delivered >= s.consumeOpts.StopAfter { close(s.consumeOpts.stopAfterMsgsLeft) } else { s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered } } } // Drain unsubscribes from the stream and cancels subscription. All // messages that are already in the buffer will be available on // subsequent calls to Next. After the buffer is drained, Next will // return ErrMsgIteratorClosed error. func (s *pullSubscription) Drain() { if !s.closed.CompareAndSwap(0, 1) { return } s.draining.Store(1) close(s.done) if s.consumeOpts.stopAfterMsgsLeft != nil { if s.delivered >= s.consumeOpts.StopAfter { close(s.consumeOpts.stopAfterMsgsLeft) } else { s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered } } } // Closed returns a channel that is closed when consuming is // fully stopped/drained. When the channel is closed, no more messages // will be received and processing is complete. func (s *pullSubscription) Closed() <-chan struct{} { s.Lock() defer s.Unlock() closedCh := s.closedCh if closedCh == nil { closedCh = make(chan struct{}) s.closedCh = closedCh } if !s.subscription.IsValid() { close(s.closedCh) s.closedCh = nil } return closedCh } // Fetch sends a single request to retrieve given number of messages. // It will wait up to provided expiry time if not all messages are available. func (p *pullConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { req := &pullRequest{ Batch: batch, Expires: DefaultExpires, Heartbeat: unset, } for _, opt := range opts { if err := opt(req); err != nil { return nil, err } } // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls // and disable it for shorter pulls if req.Heartbeat == unset { if req.Expires >= 10*time.Second { req.Heartbeat = 5 * time.Second } else { req.Heartbeat = 0 } } if req.Expires < 2*req.Heartbeat { return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) } return p.fetch(req) } // FetchBytes is used to retrieve up to a provided bytes from the stream. func (p *pullConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { req := &pullRequest{ Batch: defaultBatchMaxBytesOnly, MaxBytes: maxBytes, Expires: DefaultExpires, Heartbeat: unset, } for _, opt := range opts { if err := opt(req); err != nil { return nil, err } } // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls // and disable it for shorter pulls if req.Heartbeat == unset { if req.Expires >= 10*time.Second { req.Heartbeat = 5 * time.Second } else { req.Heartbeat = 0 } } if req.Expires < 2*req.Heartbeat { return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) } return p.fetch(req) } // FetchNoWait sends a single request to retrieve given number of messages. // FetchNoWait will only return messages that are available at the time of the // request. It will not wait for more messages to arrive. func (p *pullConsumer) FetchNoWait(batch int) (MessageBatch, error) { req := &pullRequest{ Batch: batch, NoWait: true, } return p.fetch(req) } func (p *pullConsumer) fetch(req *pullRequest) (MessageBatch, error) { res := &fetchResult{ msgs: make(chan Msg, req.Batch), } msgs := make(chan *nats.Msg, 2*req.Batch) subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) sub := &pullSubscription{ consumer: p, done: make(chan struct{}, 1), msgs: msgs, errs: make(chan error, 10), } inbox := p.js.conn.NewInbox() var err error sub.subscription, err = p.js.conn.ChanSubscribe(inbox, sub.msgs) if err != nil { return nil, err } req.PinID = p.getPinID() if err := sub.pull(req, subject); err != nil { return nil, err } var receivedMsgs, receivedBytes int hbTimer := sub.scheduleHeartbeatCheck(req.Heartbeat) go func(res *fetchResult) { defer sub.subscription.Unsubscribe() defer close(res.msgs) for { select { case msg := <-msgs: res.Lock() if hbTimer != nil { hbTimer.Reset(2 * req.Heartbeat) } userMsg, err := checkMsg(msg) if err != nil { errNotTimeoutOrNoMsgs := !errors.Is(err, nats.ErrTimeout) && !errors.Is(err, ErrNoMessages) if errNotTimeoutOrNoMsgs && !errors.Is(err, ErrMaxBytesExceeded) { res.err = err } if errors.Is(err, ErrPinIDMismatch) { p.setPinID("") } res.done = true res.Unlock() return } if !userMsg { res.Unlock() continue } if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { p.setPinID(pinId) } res.msgs <- p.js.toJSMsg(msg) meta, err := msg.Metadata() if err != nil { res.err = fmt.Errorf("parsing message metadata: %s", err) res.done = true res.Unlock() return } res.sseq = meta.Sequence.Stream receivedMsgs++ if req.MaxBytes != 0 { receivedBytes += msg.Size() } if receivedMsgs == req.Batch || (req.MaxBytes != 0 && receivedBytes >= req.MaxBytes) { res.done = true res.Unlock() return } res.Unlock() case err := <-sub.errs: res.Lock() res.err = err res.done = true res.Unlock() return case <-time.After(req.Expires + 1*time.Second): res.Lock() res.done = true res.Unlock() return } } }(res) return res, nil } func (fr *fetchResult) Messages() <-chan Msg { fr.Lock() defer fr.Unlock() return fr.msgs } func (fr *fetchResult) Error() error { fr.Lock() defer fr.Unlock() return fr.err } func (fr *fetchResult) closed() bool { fr.Lock() defer fr.Unlock() return fr.done } // Next is used to retrieve the next message from the stream. This // method will block until the message is retrieved or timeout is // reached. func (p *pullConsumer) Next(opts ...FetchOpt) (Msg, error) { res, err := p.Fetch(1, opts...) if err != nil { return nil, err } msg := <-res.Messages() if msg != nil { return msg, nil } if res.Error() == nil { return nil, nats.ErrTimeout } return nil, res.Error() } func (s *pullSubscription) pullMessages(subject string) { for { select { case req := <-s.fetchNext: s.fetchInProgress.Store(1) if err := s.pull(req, subject); err != nil { if errors.Is(err, ErrMsgIteratorClosed) { s.cleanup() return } s.errs <- err } s.fetchInProgress.Store(0) case <-s.done: s.cleanup() return } } } func (s *pullSubscription) scheduleHeartbeatCheck(dur time.Duration) *hbMonitor { if dur == 0 { return nil } return &hbMonitor{ timer: time.AfterFunc(2*dur, func() { s.errs <- ErrNoHeartbeat }), } } func (s *pullSubscription) cleanup() { // For now this function does not need to hold the lock. // Holding the lock here might cause a deadlock if Next() // is already holding the lock and waiting. // The fields that are read (subscription, hbMonitor) // are read only (Only written on creation of pullSubscription). if s.subscription == nil || !s.subscription.IsValid() { return } if s.hbMonitor != nil { s.hbMonitor.Stop() } drainMode := s.draining.Load() == 1 if drainMode { s.subscription.Drain() } else { s.subscription.Unsubscribe() } s.closed.Store(1) } // pull sends a pull request to the server and waits for messages using a subscription from [pullSubscription]. // Messages will be fetched up to given batch_size or until there are no more messages or timeout is returned func (s *pullSubscription) pull(req *pullRequest, subject string) error { s.consumer.Lock() defer s.consumer.Unlock() if s.closed.Load() == 1 { return ErrMsgIteratorClosed } if req.Batch < 1 { return fmt.Errorf("%w: batch size must be at least 1", nats.ErrInvalidArg) } reqJSON, err := json.Marshal(req) if err != nil { return err } reply := s.subscription.Subject if err := s.consumer.js.conn.PublishRequest(subject, reply, reqJSON); err != nil { return err } return nil } func parseConsumeOpts(ordered bool, opts ...PullConsumeOpt) (*consumeOpts, error) { consumeOpts := &consumeOpts{ MaxMessages: unset, MaxBytes: unset, Expires: DefaultExpires, Heartbeat: unset, ReportMissingHeartbeats: true, StopAfter: unset, } for _, opt := range opts { if err := opt.configureConsume(consumeOpts); err != nil { return nil, err } } if err := consumeOpts.setDefaults(ordered); err != nil { return nil, err } return consumeOpts, nil } func parseMessagesOpts(ordered bool, opts ...PullMessagesOpt) (*consumeOpts, error) { consumeOpts := &consumeOpts{ MaxMessages: unset, MaxBytes: unset, Expires: DefaultExpires, Heartbeat: unset, ReportMissingHeartbeats: true, StopAfter: unset, } for _, opt := range opts { if err := opt.configureMessages(consumeOpts); err != nil { return nil, err } } if err := consumeOpts.setDefaults(ordered); err != nil { return nil, err } return consumeOpts, nil } func (consumeOpts *consumeOpts) setDefaults(ordered bool) error { // we cannot use both max messages and max bytes unless we're using max bytes as fetch size limiter if consumeOpts.MaxBytes != unset && consumeOpts.MaxMessages != unset && !consumeOpts.LimitSize { return errors.New("only one of MaxMessages and MaxBytes can be specified") } if consumeOpts.MaxBytes != unset && !consumeOpts.LimitSize { // we used PullMaxBytes setting, set MaxMessages to a high value consumeOpts.MaxMessages = defaultBatchMaxBytesOnly } else if consumeOpts.MaxMessages == unset { // otherwise, if max messages is not set, set it to default value consumeOpts.MaxMessages = DefaultMaxMessages } // if user did not set max bytes, set it to 0 if consumeOpts.MaxBytes == unset { consumeOpts.MaxBytes = 0 } if consumeOpts.ThresholdMessages == 0 { // half of the max messages, rounded up consumeOpts.ThresholdMessages = int(math.Ceil(float64(consumeOpts.MaxMessages) / 2)) } if consumeOpts.ThresholdBytes == 0 { // half of the max bytes, rounded up consumeOpts.ThresholdBytes = int(math.Ceil(float64(consumeOpts.MaxBytes) / 2)) } // set default heartbeats if consumeOpts.Heartbeat == unset { // by default, use 50% of expiry time consumeOpts.Heartbeat = consumeOpts.Expires / 2 if ordered { // for ordered consumers, the default heartbeat is 5 seconds if consumeOpts.Expires < 10*time.Second { consumeOpts.Heartbeat = consumeOpts.Expires / 2 } else { consumeOpts.Heartbeat = 5 * time.Second } } else if consumeOpts.Heartbeat > 30*time.Second { // cap the heartbeat to 30 seconds consumeOpts.Heartbeat = 30 * time.Second } } if consumeOpts.Heartbeat > consumeOpts.Expires/2 { return errors.New("the value of Heartbeat must be less than 50%% of expiry") } return nil } func (c *pullConsumer) getPinID() string { c.Lock() defer c.Unlock() return c.pinID } func (c *pullConsumer) setPinID(pinID string) { c.Lock() defer c.Unlock() c.pinID = pinID } nats.go-1.41.0/jetstream/stream.go000066400000000000000000000542301477351342400170040ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "context" "encoding/json" "errors" "fmt" "strconv" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) type ( // Stream contains CRUD methods on a consumer via [ConsumerManager], as well // as operations on an existing stream. It allows fetching and removing // messages from a stream, as well as purging a stream. Stream interface { ConsumerManager // Info returns StreamInfo from the server. Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) // CachedInfo returns ConsumerInfo currently cached on this stream. // This method does not perform any network requests. The cached // StreamInfo is updated on every call to Info and Update. CachedInfo() *StreamInfo // Purge removes messages from a stream. It is a destructive operation. // Use with caution. See StreamPurgeOpt for available options. Purge(ctx context.Context, opts ...StreamPurgeOpt) error // GetMsg retrieves a raw stream message stored in JetStream by sequence number. GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) // GetLastMsgForSubject retrieves the last raw stream message stored in // JetStream on a given subject subject. GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) // DeleteMsg deletes a message from a stream. // On the server, the message is marked as erased, but not overwritten. DeleteMsg(ctx context.Context, seq uint64) error // SecureDeleteMsg deletes a message from a stream. The deleted message // is overwritten with random data. As a result, this operation is slower // than DeleteMsg. SecureDeleteMsg(ctx context.Context, seq uint64) error } // ConsumerManager provides CRUD API for managing consumers. It is // available as a part of [Stream] interface. CreateConsumer, // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a // [Consumer] interface, allowing to operate on a consumer (e.g. consume // messages). ConsumerManager interface { // CreateOrUpdateConsumer creates a consumer on a given stream with // given config. If consumer already exists, it will be updated (if // possible). Consumer interface is returned, allowing to operate on a // consumer (e.g. fetch messages). CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) // CreateConsumer creates a consumer on a given stream with given // config. If consumer already exists and the provided configuration // differs from its configuration, ErrConsumerExists is returned. If the // provided configuration is the same as the existing consumer, the // existing consumer is returned. Consumer interface is returned, // allowing to operate on a consumer (e.g. fetch messages). CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) // UpdateConsumer updates an existing consumer. If consumer does not // exist, ErrConsumerDoesNotExist is returned. Consumer interface is // returned, allowing to operate on a consumer (e.g. fetch messages). UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer // are managed by the library and provide a simple way to consume // messages from a stream. Ordered consumers are ephemeral in-memory // pull consumers and are resilient to deletes and restarts. OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) // Consumer returns an interface to an existing consumer, allowing processing // of messages. If consumer does not exist, ErrConsumerNotFound is // returned. Consumer(ctx context.Context, consumer string) (Consumer, error) // DeleteConsumer removes a consumer with given name from a stream. // If consumer does not exist, ErrConsumerNotFound is returned. DeleteConsumer(ctx context.Context, consumer string) error // PauseConsumer pauses a consumer. PauseConsumer(ctx context.Context, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) // ResumeConsumer resumes a consumer. ResumeConsumer(ctx context.Context, consumer string) (*ConsumerPauseResponse, error) // ListConsumers returns ConsumerInfoLister enabling iterating over a // channel of consumer infos. ListConsumers(context.Context) ConsumerInfoLister // ConsumerNames returns a ConsumerNameLister enabling iterating over a // channel of consumer names. ConsumerNames(context.Context) ConsumerNameLister // UnpinConsumer unpins the currently pinned client for a consumer for the given group name. // If consumer does not exist, ErrConsumerNotFound is returned. UnpinConsumer(ctx context.Context, consumer string, group string) error } RawStreamMsg struct { Subject string Sequence uint64 Header nats.Header Data []byte Time time.Time } stream struct { name string info *StreamInfo js *jetStream } // StreamInfoOpt is a function setting options for [Stream.Info] StreamInfoOpt func(*streamInfoRequest) error streamInfoRequest struct { apiPaged DeletedDetails bool `json:"deleted_details,omitempty"` SubjectFilter string `json:"subjects_filter,omitempty"` } consumerInfoResponse struct { apiResponse *ConsumerInfo } // StreamPurgeOpt is a function setting options for [Stream.Purge] StreamPurgeOpt func(*StreamPurgeRequest) error // StreamPurgeRequest is an API request body to purge a stream. StreamPurgeRequest struct { // Purge up to but not including sequence. Sequence uint64 `json:"seq,omitempty"` // Subject to match against messages for the purge command. Subject string `json:"filter,omitempty"` // Number of messages to keep. Keep uint64 `json:"keep,omitempty"` } streamPurgeResponse struct { apiResponse Success bool `json:"success,omitempty"` Purged uint64 `json:"purged"` } consumerDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } consumerPauseRequest struct { PauseUntil *time.Time `json:"pause_until,omitempty"` } ConsumerPauseResponse struct { // Paused is true if the consumer is paused. Paused bool `json:"paused"` // PauseUntil is the time until the consumer is paused. PauseUntil time.Time `json:"pause_until"` // PauseRemaining is the time remaining until the consumer is paused. PauseRemaining time.Duration `json:"pause_remaining,omitempty"` } consumerPauseApiResponse struct { apiResponse ConsumerPauseResponse } // GetMsgOpt is a function setting options for [Stream.GetMsg] GetMsgOpt func(*apiMsgGetRequest) error apiMsgGetRequest struct { Seq uint64 `json:"seq,omitempty"` LastFor string `json:"last_by_subj,omitempty"` NextFor string `json:"next_by_subj,omitempty"` } // apiMsgGetResponse is the response for a Stream get request. apiMsgGetResponse struct { apiResponse Message *storedMsg `json:"message,omitempty"` } // storedMsg is a raw message stored in JetStream. storedMsg struct { Subject string `json:"subject"` Sequence uint64 `json:"seq"` Header []byte `json:"hdrs,omitempty"` Data []byte `json:"data,omitempty"` Time time.Time `json:"time"` } msgDeleteRequest struct { Seq uint64 `json:"seq"` NoErase bool `json:"no_erase,omitempty"` } msgDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } // ConsumerInfoLister is used to iterate over a channel of consumer infos. // Err method can be used to check for errors encountered during iteration. // Info channel is always closed and therefore can be used in a range loop. ConsumerInfoLister interface { Info() <-chan *ConsumerInfo Err() error } // ConsumerNameLister is used to iterate over a channel of consumer names. // Err method can be used to check for errors encountered during iteration. // Name channel is always closed and therefore can be used in a range loop. ConsumerNameLister interface { Name() <-chan string Err() error } consumerLister struct { js *jetStream offset int pageInfo *apiPaged consumers chan *ConsumerInfo names chan string err error } consumerListResponse struct { apiResponse apiPaged Consumers []*ConsumerInfo `json:"consumers"` } consumerNamesResponse struct { apiResponse apiPaged Consumers []string `json:"consumers"` } consumerUnpinRequest struct { Group string `json:"group"` } ) // CreateOrUpdateConsumer creates a consumer on a given stream with // given config. If consumer already exists, it will be updated (if // possible). Consumer interface is returned, allowing to operate on a // consumer (e.g. fetch messages). func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreateOrUpdate) } // CreateConsumer creates a consumer on a given stream with given // config. If consumer already exists and the provided configuration // differs from its configuration, ErrConsumerExists is returned. If the // provided configuration is the same as the existing consumer, the // existing consumer is returned. Consumer interface is returned, // allowing to operate on a consumer (e.g. fetch messages). func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreate) } // UpdateConsumer updates an existing consumer. If consumer does not // exist, ErrConsumerDoesNotExist is returned. Consumer interface is // returned, allowing to operate on a consumer (e.g. fetch messages). func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionUpdate) } // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer // are managed by the library and provide a simple way to consume // messages from a stream. Ordered consumers are ephemeral in-memory // pull consumers and are resilient to deletes and restarts. func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) { oc := &orderedConsumer{ js: s.js, cfg: &cfg, stream: s.name, namePrefix: nuid.Next(), doReset: make(chan struct{}, 1), } consCfg := oc.getConsumerConfig() cons, err := s.CreateOrUpdateConsumer(ctx, *consCfg) if err != nil { return nil, err } oc.currentConsumer = cons.(*pullConsumer) return oc, nil } // Consumer returns an interface to an existing consumer, allowing processing // of messages. If consumer does not exist, ErrConsumerNotFound is // returned. func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) { return getConsumer(ctx, s.js, s.name, name) } // DeleteConsumer removes a consumer with given name from a stream. // If consumer does not exist, ErrConsumerNotFound is returned. func (s *stream) DeleteConsumer(ctx context.Context, name string) error { return deleteConsumer(ctx, s.js, s.name, name) } // PauseConsumer pauses a consumer. func (s *stream) PauseConsumer(ctx context.Context, name string, pauseUntil time.Time) (*ConsumerPauseResponse, error) { return pauseConsumer(ctx, s.js, s.name, name, &pauseUntil) } // ResumeConsumer resumes a consumer. func (s *stream) ResumeConsumer(ctx context.Context, name string) (*ConsumerPauseResponse, error) { return resumeConsumer(ctx, s.js, s.name, name) } // Info returns StreamInfo from the server. func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) { ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } var infoReq *streamInfoRequest for _, opt := range opts { if infoReq == nil { infoReq = &streamInfoRequest{} } if err := opt(infoReq); err != nil { return nil, err } } var req []byte var err error var subjectMap map[string]uint64 var offset int infoSubject := fmt.Sprintf(apiStreamInfoT, s.name) var info *StreamInfo for { if infoReq != nil { if infoReq.SubjectFilter != "" { if subjectMap == nil { subjectMap = make(map[string]uint64) } infoReq.Offset = offset } req, err = json.Marshal(infoReq) if err != nil { return nil, err } } var resp streamInfoResponse if _, err = s.js.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeStreamNotFound { return nil, ErrStreamNotFound } return nil, resp.Error } info = resp.StreamInfo var total int if resp.Total != 0 { total = resp.Total } if len(resp.StreamInfo.State.Subjects) > 0 { for subj, msgs := range resp.StreamInfo.State.Subjects { subjectMap[subj] = msgs } offset = len(subjectMap) } if total == 0 || total <= offset { info.State.Subjects = nil // we don't want to store subjects in cache cached := *info s.info = &cached info.State.Subjects = subjectMap break } } return info, nil } // CachedInfo returns ConsumerInfo currently cached on this stream. // This method does not perform any network requests. The cached // StreamInfo is updated on every call to Info and Update. func (s *stream) CachedInfo() *StreamInfo { return s.info } // Purge removes messages from a stream. It is a destructive operation. // Use with caution. See StreamPurgeOpt for available options. func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error { ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } var purgeReq StreamPurgeRequest for _, opt := range opts { if err := opt(&purgeReq); err != nil { return err } } var req []byte var err error req, err = json.Marshal(purgeReq) if err != nil { return err } purgeSubject := fmt.Sprintf(apiStreamPurgeT, s.name) var resp streamPurgeResponse if _, err = s.js.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil { return err } if resp.Error != nil { return resp.Error } return nil } // GetMsg retrieves a raw stream message stored in JetStream by sequence number. func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) { req := &apiMsgGetRequest{Seq: seq} for _, opt := range opts { if err := opt(req); err != nil { return nil, err } } return s.getMsg(ctx, req) } // GetLastMsgForSubject retrieves the last raw stream message stored in // JetStream on a given subject subject. func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) { return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject}) } func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) { ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } req, err := json.Marshal(mreq) if err != nil { return nil, err } var gmSubj string // handle direct gets if s.info.Config.AllowDirect { if mreq.LastFor != "" { gmSubj = fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor) r, err := s.js.apiRequest(ctx, gmSubj, nil) if err != nil { return nil, err } return convertDirectGetMsgResponseToMsg(r.msg) } gmSubj = fmt.Sprintf(apiDirectMsgGetT, s.name) r, err := s.js.apiRequest(ctx, gmSubj, req) if err != nil { return nil, err } return convertDirectGetMsgResponseToMsg(r.msg) } var resp apiMsgGetResponse dsSubj := fmt.Sprintf(apiMsgGetT, s.name) _, err = s.js.apiRequestJSON(ctx, dsSubj, &resp, req) if err != nil { return nil, err } if resp.Error != nil { if resp.Error.ErrorCode == JSErrCodeMessageNotFound { return nil, ErrMsgNotFound } return nil, resp.Error } msg := resp.Message var hdr nats.Header if len(msg.Header) > 0 { hdr, err = nats.DecodeHeadersMsg(msg.Header) if err != nil { return nil, err } } return &RawStreamMsg{ Subject: msg.Subject, Sequence: msg.Sequence, Header: hdr, Data: msg.Data, Time: msg.Time, }, nil } func convertDirectGetMsgResponseToMsg(r *nats.Msg) (*RawStreamMsg, error) { // Check for 404/408. We would get a no-payload message and a "Status" header if len(r.Data) == 0 { val := r.Header.Get(statusHdr) if val != "" { switch val { case noMessages: return nil, ErrMsgNotFound default: desc := r.Header.Get("Description") if desc == "" { desc = "unable to get message" } return nil, fmt.Errorf("nats: %s", desc) } } } // Check for headers that give us the required information to // reconstruct the message. if len(r.Header) == 0 { return nil, errors.New("nats: response should have headers") } stream := r.Header.Get(StreamHeader) if stream == "" { return nil, errors.New("nats: missing stream header") } seqStr := r.Header.Get(SequenceHeader) if seqStr == "" { return nil, errors.New("nats: missing sequence header") } seq, err := strconv.ParseUint(seqStr, 10, 64) if err != nil { return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) } timeStr := r.Header.Get(TimeStampHeaer) if timeStr == "" { return nil, errors.New("nats: missing timestamp header") } tm, err := time.Parse(time.RFC3339Nano, timeStr) if err != nil { return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) } subj := r.Header.Get(SubjectHeader) if subj == "" { return nil, errors.New("nats: missing subject header") } return &RawStreamMsg{ Subject: subj, Sequence: seq, Header: r.Header, Data: r.Data, Time: tm, }, nil } // DeleteMsg deletes a message from a stream. // On the server, the message is marked as erased, but not overwritten. func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error { return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true}) } // SecureDeleteMsg deletes a message from a stream. The deleted message // is overwritten with random data. As a result, this operation is slower // than DeleteMsg. func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error { return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq}) } func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error { ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } r, err := json.Marshal(req) if err != nil { return err } subj := fmt.Sprintf(apiMsgDeleteT, s.name) var resp msgDeleteResponse if _, err = s.js.apiRequestJSON(ctx, subj, &resp, r); err != nil { return err } if !resp.Success { return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, resp.Error.Error()) } return nil } // ListConsumers returns ConsumerInfoLister enabling iterating over a // channel of consumer infos. func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister { l := &consumerLister{ js: s.js, consumers: make(chan *ConsumerInfo), } go func() { defer close(l.consumers) ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } for { page, err := l.consumerInfos(ctx, s.name) if err != nil && !errors.Is(err, ErrEndOfData) { l.err = err return } for _, info := range page { select { case <-ctx.Done(): l.err = ctx.Err() return default: } if info != nil { l.consumers <- info } } if errors.Is(err, ErrEndOfData) { return } } }() return l } func (s *consumerLister) Info() <-chan *ConsumerInfo { return s.consumers } func (s *consumerLister) Err() error { return s.err } // ConsumerNames returns a ConsumerNameLister enabling iterating over a // channel of consumer names. func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister { l := &consumerLister{ js: s.js, names: make(chan string), } go func() { defer close(l.names) ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) if cancel != nil { defer cancel() } for { page, err := l.consumerNames(ctx, s.name) if err != nil && !errors.Is(err, ErrEndOfData) { l.err = err return } for _, info := range page { select { case l.names <- info: case <-ctx.Done(): l.err = ctx.Err() return } } if errors.Is(err, ErrEndOfData) { return } } }() return l } func (s *consumerLister) Name() <-chan string { return s.names } // consumerInfos fetches the next ConsumerInfo page func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) { if s.pageInfo != nil && s.offset >= s.pageInfo.Total { return nil, ErrEndOfData } req, err := json.Marshal( apiPagedRequest{Offset: s.offset}, ) if err != nil { return nil, err } slSubj := fmt.Sprintf(apiConsumerListT, stream) var resp consumerListResponse _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) if err != nil { return nil, err } if resp.Error != nil { return nil, resp.Error } s.pageInfo = &resp.apiPaged s.offset += len(resp.Consumers) return resp.Consumers, nil } // consumerNames fetches the next consumer names page func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) { if s.pageInfo != nil && s.offset >= s.pageInfo.Total { return nil, ErrEndOfData } req, err := json.Marshal( apiPagedRequest{Offset: s.offset}, ) if err != nil { return nil, err } slSubj := fmt.Sprintf(apiConsumerNamesT, stream) var resp consumerNamesResponse _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) if err != nil { return nil, err } if resp.Error != nil { return nil, resp.Error } s.pageInfo = &resp.apiPaged s.offset += len(resp.Consumers) return resp.Consumers, nil } // UnpinConsumer unpins the currently pinned client for a consumer for the given group name. // If consumer does not exist, ErrConsumerNotFound is returned. func (s *stream) UnpinConsumer(ctx context.Context, consumer string, group string) error { return unpinConsumer(ctx, s.js, s.name, consumer, group) } nats.go-1.41.0/jetstream/stream_config.go000066400000000000000000000473261477351342400203410ustar00rootroot00000000000000// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jetstream import ( "encoding/json" "errors" "fmt" "strings" "time" "golang.org/x/text/cases" "golang.org/x/text/language" ) type ( // StreamInfo shows config and current state for this stream. StreamInfo struct { // Config contains the configuration settings of the stream, set when // creating or updating the stream. Config StreamConfig `json:"config"` // Created is the timestamp when the stream was created. Created time.Time `json:"created"` // State provides the state of the stream at the time of request, // including metrics like the number of messages in the stream, total // bytes, etc. State StreamState `json:"state"` // Cluster contains information about the cluster to which this stream // belongs (if applicable). Cluster *ClusterInfo `json:"cluster,omitempty"` // Mirror contains information about another stream this one is // mirroring. Mirroring is used to create replicas of another stream's // data. This field is omitted if the stream is not mirroring another // stream. Mirror *StreamSourceInfo `json:"mirror,omitempty"` // Sources is a list of source streams from which this stream collects // data. Sources []*StreamSourceInfo `json:"sources,omitempty"` // TimeStamp indicates when the info was gathered by the server. TimeStamp time.Time `json:"ts"` } // StreamConfig is the configuration of a JetStream stream. StreamConfig struct { // Name is the name of the stream. It is required and must be unique // across the JetStream account. // // Name Names cannot contain whitespace, ., *, >, path separators // (forward or backwards slash), and non-printable characters. Name string `json:"name"` // Description is an optional description of the stream. Description string `json:"description,omitempty"` // Subjects is a list of subjects that the stream is listening on. // Wildcards are supported. Subjects cannot be set if the stream is // created as a mirror. Subjects []string `json:"subjects,omitempty"` // Retention defines the message retention policy for the stream. // Defaults to LimitsPolicy. Retention RetentionPolicy `json:"retention"` // MaxConsumers specifies the maximum number of consumers allowed for // the stream. MaxConsumers int `json:"max_consumers"` // MaxMsgs is the maximum number of messages the stream will store. // After reaching the limit, stream adheres to the discard policy. // If not set, server default is -1 (unlimited). MaxMsgs int64 `json:"max_msgs"` // MaxBytes is the maximum total size of messages the stream will store. // After reaching the limit, stream adheres to the discard policy. // If not set, server default is -1 (unlimited). MaxBytes int64 `json:"max_bytes"` // Discard defines the policy for handling messages when the stream // reaches its limits in terms of number of messages or total bytes. Discard DiscardPolicy `json:"discard"` // DiscardNewPerSubject is a flag to enable discarding new messages per // subject when limits are reached. Requires DiscardPolicy to be // DiscardNew and the MaxMsgsPerSubject to be set. DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` // MaxAge is the maximum age of messages that the stream will retain. MaxAge time.Duration `json:"max_age"` // MaxMsgsPerSubject is the maximum number of messages per subject that // the stream will retain. MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` // MaxMsgSize is the maximum size of any single message in the stream. MaxMsgSize int32 `json:"max_msg_size,omitempty"` // Storage specifies the type of storage backend used for the stream // (file or memory). Storage StorageType `json:"storage"` // Replicas is the number of stream replicas in clustered JetStream. // Defaults to 1, maximum is 5. Replicas int `json:"num_replicas"` // NoAck is a flag to disable acknowledging messages received by this // stream. // // If set to true, publish methods from the JetStream client will not // work as expected, since they rely on acknowledgements. Core NATS // publish methods should be used instead. Note that this will make // message delivery less reliable. NoAck bool `json:"no_ack,omitempty"` // Duplicates is the window within which to track duplicate messages. // If not set, server default is 2 minutes. Duplicates time.Duration `json:"duplicate_window,omitempty"` // Placement is used to declare where the stream should be placed via // tags and/or an explicit cluster name. Placement *Placement `json:"placement,omitempty"` // Mirror defines the configuration for mirroring another stream. Mirror *StreamSource `json:"mirror,omitempty"` // Sources is a list of other streams this stream sources messages from. Sources []*StreamSource `json:"sources,omitempty"` // Sealed streams do not allow messages to be published or deleted via limits or API, // sealed streams can not be unsealed via configuration update. Can only // be set on already created streams via the Update API. Sealed bool `json:"sealed,omitempty"` // DenyDelete restricts the ability to delete messages from a stream via // the API. Defaults to false. DenyDelete bool `json:"deny_delete,omitempty"` // DenyPurge restricts the ability to purge messages from a stream via // the API. Defaults to false. DenyPurge bool `json:"deny_purge,omitempty"` // AllowRollup allows the use of the Nats-Rollup header to replace all // contents of a stream, or subject in a stream, with a single new // message. AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` // Compression specifies the message storage compression algorithm. // Defaults to NoCompression. Compression StoreCompression `json:"compression"` // FirstSeq is the initial sequence number of the first message in the // stream. FirstSeq uint64 `json:"first_seq,omitempty"` // SubjectTransform allows applying a transformation to matching // messages' subjects. SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` // RePublish allows immediate republishing a message to the configured // subject after it's stored. RePublish *RePublish `json:"republish,omitempty"` // AllowDirect enables direct access to individual messages using direct // get API. Defaults to false. AllowDirect bool `json:"allow_direct"` // MirrorDirect enables direct access to individual messages from the // origin stream using direct get API. Defaults to false. MirrorDirect bool `json:"mirror_direct"` // ConsumerLimits defines limits of certain values that consumers can // set, defaults for those who don't set these settings ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` // Metadata is a set of application-defined key-value pairs for // associating metadata on the stream. This feature requires nats-server // v2.10.0 or later. Metadata map[string]string `json:"metadata,omitempty"` // Template identifies the template that manages the Stream. // Deprecated: This feature is no longer supported. Template string `json:"template_owner,omitempty"` // AllowMsgTTL allows header initiated per-message TTLs. // This feature requires nats-server v2.11.0 or later. AllowMsgTTL bool `json:"allow_msg_ttl"` // Enables and sets a duration for adding server markers for delete, purge and max age limits. // This feature requires nats-server v2.11.0 or later. SubjectDeleteMarkerTTL time.Duration `json:"subject_delete_marker_ttl,omitempty"` } // StreamSourceInfo shows information about an upstream stream // source/mirror. StreamSourceInfo struct { // Name is the name of the stream that is being replicated. Name string `json:"name"` // Lag informs how many messages behind the source/mirror operation is. // This will only show correctly if there is active communication // with stream/mirror. Lag uint64 `json:"lag"` // Active informs when last the mirror or sourced stream had activity. // Value will be -1 when there has been no activity. Active time.Duration `json:"active"` // FilterSubject is the subject filter defined for this source/mirror. FilterSubject string `json:"filter_subject,omitempty"` // SubjectTransforms is a list of subject transforms defined for this // source/mirror. SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` } // StreamState is the state of a JetStream stream at the time of request. StreamState struct { // Msgs is the number of messages stored in the stream. Msgs uint64 `json:"messages"` // Bytes is the number of bytes stored in the stream. Bytes uint64 `json:"bytes"` // FirstSeq is the sequence number of the first message in the stream. FirstSeq uint64 `json:"first_seq"` // FirstTime is the timestamp of the first message in the stream. FirstTime time.Time `json:"first_ts"` // LastSeq is the sequence number of the last message in the stream. LastSeq uint64 `json:"last_seq"` // LastTime is the timestamp of the last message in the stream. LastTime time.Time `json:"last_ts"` // Consumers is the number of consumers on the stream. Consumers int `json:"consumer_count"` // Deleted is a list of sequence numbers that have been removed from the // stream. This field will only be returned if the stream has been // fetched with the DeletedDetails option. Deleted []uint64 `json:"deleted"` // NumDeleted is the number of messages that have been removed from the // stream. Only deleted messages causing a gap in stream sequence numbers // are counted. Messages deleted at the beginning or end of the stream // are not counted. NumDeleted int `json:"num_deleted"` // NumSubjects is the number of unique subjects the stream has received // messages on. NumSubjects uint64 `json:"num_subjects"` // Subjects is a map of subjects the stream has received messages on // with message count per subject. This field will only be returned if // the stream has been fetched with the SubjectFilter option. Subjects map[string]uint64 `json:"subjects"` } // ClusterInfo shows information about the underlying set of servers that // make up the stream or consumer. ClusterInfo struct { // Name is the name of the cluster. Name string `json:"name,omitempty"` // Leader is the server name of the RAFT leader. Leader string `json:"leader,omitempty"` // Replicas is the list of members of the RAFT cluster Replicas []*PeerInfo `json:"replicas,omitempty"` } // PeerInfo shows information about the peers in the cluster that are // supporting the stream or consumer. PeerInfo struct { // Name is the server name of the peer. Name string `json:"name"` // Current indicates if the peer is up to date and synchronized with the // leader. Current bool `json:"current"` // Offline indicates if the peer is considered offline by the group. Offline bool `json:"offline,omitempty"` // Active it the duration since this peer was last seen. Active time.Duration `json:"active"` // Lag is the number of uncommitted operations this peer is behind the // leader. Lag uint64 `json:"lag,omitempty"` } // SubjectTransformConfig is for applying a subject transform (to matching // messages) before doing anything else when a new message is received. SubjectTransformConfig struct { // Source is the subject pattern to match incoming messages against. Source string `json:"src"` // Destination is the subject pattern to remap the subject to. Destination string `json:"dest"` } // RePublish is for republishing messages once committed to a stream. The // original subject is remapped from the subject pattern to the destination // pattern. RePublish struct { // Source is the subject pattern to match incoming messages against. Source string `json:"src,omitempty"` // Destination is the subject pattern to republish the subject to. Destination string `json:"dest"` // HeadersOnly is a flag to indicate that only the headers should be // republished. HeadersOnly bool `json:"headers_only,omitempty"` } // Placement is used to guide placement of streams in clustered JetStream. Placement struct { // Cluster is the name of the cluster to which the stream should be // assigned. Cluster string `json:"cluster"` // Tags are used to match streams to servers in the cluster. A stream // will be assigned to a server with a matching tag. Tags []string `json:"tags,omitempty"` } // StreamSource dictates how streams can source from other streams. StreamSource struct { // Name is the name of the stream to source from. Name string `json:"name"` // OptStartSeq is the sequence number to start sourcing from. OptStartSeq uint64 `json:"opt_start_seq,omitempty"` // OptStartTime is the timestamp of messages to start sourcing from. OptStartTime *time.Time `json:"opt_start_time,omitempty"` // FilterSubject is the subject filter used to only replicate messages // with matching subjects. FilterSubject string `json:"filter_subject,omitempty"` // SubjectTransforms is a list of subject transforms to apply to // matching messages. // // Subject transforms on sources and mirrors are also used as subject // filters with optional transformations. SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` // External is a configuration referencing a stream source in another // account or JetStream domain. External *ExternalStream `json:"external,omitempty"` // Domain is used to configure a stream source in another JetStream // domain. This setting will set the External field with the appropriate // APIPrefix. Domain string `json:"-"` } // ExternalStream allows you to qualify access to a stream source in another // account. ExternalStream struct { // APIPrefix is the subject prefix that imports the other account/domain // $JS.API.CONSUMER.> subjects. APIPrefix string `json:"api"` // DeliverPrefix is the delivery subject to use for the push consumer. DeliverPrefix string `json:"deliver"` } // StreamConsumerLimits are the limits for a consumer on a stream. These can // be overridden on a per consumer basis. StreamConsumerLimits struct { // InactiveThreshold is a duration which instructs the server to clean // up the consumer if it has been inactive for the specified duration. InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` // MaxAckPending is a maximum number of outstanding unacknowledged // messages for a consumer. MaxAckPending int `json:"max_ack_pending,omitempty"` } // DiscardPolicy determines how to proceed when limits of messages or bytes // are reached. DiscardPolicy int // RetentionPolicy determines how messages in a stream are retained. RetentionPolicy int // StorageType determines how messages are stored for retention. StorageType int // StoreCompression determines how messages are compressed. StoreCompression uint8 ) const ( // LimitsPolicy (default) means that messages are retained until any given // limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge. LimitsPolicy RetentionPolicy = iota // InterestPolicy specifies that when all known observables have // acknowledged a message it can be removed. InterestPolicy // WorkQueuePolicy specifies that when the first worker or subscriber // acknowledges the message it can be removed. WorkQueuePolicy ) const ( // DiscardOld will remove older messages to return to the limits. This is // the default. DiscardOld DiscardPolicy = iota // DiscardNew will fail to store new messages once the limits are reached. DiscardNew ) const ( limitsPolicyString = "limits" interestPolicyString = "interest" workQueuePolicyString = "workqueue" ) func (rp RetentionPolicy) String() string { switch rp { case LimitsPolicy: return "Limits" case InterestPolicy: return "Interest" case WorkQueuePolicy: return "WorkQueue" default: return "Unknown Retention Policy" } } func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { switch rp { case LimitsPolicy: return json.Marshal(limitsPolicyString) case InterestPolicy: return json.Marshal(interestPolicyString) case WorkQueuePolicy: return json.Marshal(workQueuePolicyString) default: return nil, fmt.Errorf("nats: can not marshal %v", rp) } } func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString(limitsPolicyString): *rp = LimitsPolicy case jsonString(interestPolicyString): *rp = InterestPolicy case jsonString(workQueuePolicyString): *rp = WorkQueuePolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (dp DiscardPolicy) String() string { switch dp { case DiscardOld: return "DiscardOld" case DiscardNew: return "DiscardNew" default: return "Unknown Discard Policy" } } func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { switch dp { case DiscardOld: return json.Marshal("old") case DiscardNew: return json.Marshal("new") default: return nil, fmt.Errorf("nats: can not marshal %v", dp) } } func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { switch strings.ToLower(string(data)) { case jsonString("old"): *dp = DiscardOld case jsonString("new"): *dp = DiscardNew default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } const ( // FileStorage specifies on disk storage. It's the default. FileStorage StorageType = iota // MemoryStorage specifies in memory only. MemoryStorage ) const ( memoryStorageString = "memory" fileStorageString = "file" ) func (st StorageType) String() string { caser := cases.Title(language.AmericanEnglish) switch st { case MemoryStorage: return caser.String(memoryStorageString) case FileStorage: return caser.String(fileStorageString) default: return "Unknown Storage Type" } } func (st StorageType) MarshalJSON() ([]byte, error) { switch st { case MemoryStorage: return json.Marshal(memoryStorageString) case FileStorage: return json.Marshal(fileStorageString) default: return nil, fmt.Errorf("nats: can not marshal %v", st) } } func (st *StorageType) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString(memoryStorageString): *st = MemoryStorage case jsonString(fileStorageString): *st = FileStorage default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func jsonString(s string) string { return "\"" + s + "\"" } const ( // NoCompression disables compression on the stream. This is the default. NoCompression StoreCompression = iota // S2Compression enables S2 compression on the stream. S2Compression ) func (alg StoreCompression) String() string { switch alg { case NoCompression: return "None" case S2Compression: return "S2" default: return "Unknown StoreCompression" } } func (alg StoreCompression) MarshalJSON() ([]byte, error) { var str string switch alg { case S2Compression: str = "s2" case NoCompression: str = "none" default: return nil, errors.New("unknown compression algorithm") } return json.Marshal(str) } func (alg *StoreCompression) UnmarshalJSON(b []byte) error { var str string if err := json.Unmarshal(b, &str); err != nil { return err } switch str { case "s2": *alg = S2Compression case "none": *alg = NoCompression default: return errors.New("unknown compression algorithm") } return nil } nats.go-1.41.0/jetstream/test/000077500000000000000000000000001477351342400161355ustar00rootroot00000000000000nats.go-1.41.0/jetstream/test/consumer_test.go000066400000000000000000000774451477351342400213770ustar00rootroot00000000000000// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestConsumerInfo(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) t.Run("get consumer info, ok", func(t *testing.T) { nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Stream != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Stream) } if info.Config.Description != "test consumer" { t.Fatalf("Invalid consumer description; expected: 'test consumer'; got: %s", info.Config.Description) } if info.Config.PauseUntil != nil { t.Fatalf("Consumer should not be paused") } if info.Paused != false { t.Fatalf("Consumer should not be paused") } if info.PauseRemaining != 0 { t.Fatalf("Consumer should not be paused") } // update consumer and see if info is updated _, err = s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "updated consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err = c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Stream != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Stream) } if info.Config.Description != "updated consumer" { t.Fatalf("Invalid consumer description; expected: 'updated consumer'; got: %s", info.Config.Description) } }) t.Run("consumer does not exist", func(t *testing.T) { nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.Stream(ctx, "foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.Consumer(ctx, "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteConsumer(ctx, "cons"); err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Info(ctx) if err == nil || !errors.Is(err, jetstream.ErrConsumerNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerNotFound, err) } }) } func TestConsumerOverflow(t *testing.T) { t.Run("fetch", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ PriorityPolicy: jetstream.PriorityPolicyOverflow, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Check that consumer got proper priority policy and TTL info := c.CachedInfo() if info.Config.PriorityPolicy != jetstream.PriorityPolicyOverflow { t.Fatalf("Invalid priority policy; expected: %v; got: %v", jetstream.PriorityPolicyOverflow, info.Config.PriorityPolicy) } for range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // We are below overflow, so we should not get any messages. msgs, err := c.Fetch(10, jetstream.FetchMinPending(110), jetstream.FetchMaxWait(500*time.Millisecond), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count := 0 for msg := range msgs.Messages() { msg.Ack() count++ } if count != 0 { t.Fatalf("Expected 0 messages, got %d", count) } // Add more messages for range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } msgs, err = c.Fetch(10, jetstream.FetchMinPending(110), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count = 0 for msg := range msgs.Messages() { if err := msg.DoubleAck(context.Background()); err != nil { t.Fatalf("Unexpected error: %v", err) } count++ } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } // try fetching messages with min ack pending msgs, err = c.Fetch(10, jetstream.FetchMaxWait(500*time.Millisecond), jetstream.FetchMinAckPending(10), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count = 0 for msg := range msgs.Messages() { msg.Ack() count++ } if count != 0 { t.Fatalf("Expected 0 messages, got %d", count) } // now fetch some more messages but do not ack them, // we will test min ack pending msgs, err = c.Fetch(10, jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count = 0 for range msgs.Messages() { count++ } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } // now fetch messages with min ack pending msgs, err = c.Fetch(10, jetstream.FetchMinAckPending(10), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count = 0 for msg := range msgs.Messages() { msg.Ack() count++ } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } }) t.Run("consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ PriorityPolicy: jetstream.PriorityPolicyOverflow, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Check that consumer got proper priority policy and TTL info := c.CachedInfo() if info.Config.PriorityPolicy != jetstream.PriorityPolicyOverflow { t.Fatalf("Invalid priority policy; expected: %v; got: %v", jetstream.PriorityPolicyOverflow, info.Config.PriorityPolicy) } for i := range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte(fmt.Sprintf("hello %d", i))) if err != nil { t.Fatalf("Unexpected error: %v", err) } } count := atomic.Uint32{} handler := func(m jetstream.Msg) { if err := m.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } count.Add(1) } cc, err := c.Consume(handler, jetstream.PullPriorityGroup("A"), jetstream.PullMinPending(110), jetstream.PullMaxMessages(20), jetstream.PullExpiry(time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer cc.Stop() time.Sleep(3 * time.Second) // there are 100 messages on the stream, and min pending is 110 // so we should not get any messages if count.Load() != 0 { t.Fatalf("Expected 0 messages, got %d", count.Load()) } // Add more messages for i := 100; i < 200; i++ { _, err = js.Publish(ctx, "FOO.bar", []byte(fmt.Sprintf("hello %d", i))) if err != nil { t.Fatalf("Unexpected error: %v", err) } } time.Sleep(100 * time.Millisecond) // now we should get 91 messages, because `Consume` will // keep getting messages until it drops below min pending if count.Load() != 91 { t.Fatalf("Expected 10 messages, got %d", count.Load()) } cc.Stop() // now test min ack pending count.Store(0) // consume with min ack pending cc, err = c.Consume(handler, jetstream.PullPriorityGroup("A"), jetstream.PullMinAckPending(5), jetstream.PullMaxMessages(20)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer cc.Stop() time.Sleep(100 * time.Millisecond) // no messages should be received, there are no pending acks if count.Load() != 0 { t.Fatalf("Expected 0 messages, got %d", count.Load()) } // fetch some messages, do not ack them msgs, err := c.Fetch(10, jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } i := 0 for range msgs.Messages() { i++ } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } if i != 10 { t.Fatalf("Expected 10 messages, got %d", i) } time.Sleep(100 * time.Millisecond) // we should process the rest of the stream minus the 10 unacked messages // 200 - 91 - 10 = 99 if count.Load() != 99 { t.Fatalf("Expected 5 messages, got %d", count.Load()) } }) t.Run("messages", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ PriorityPolicy: jetstream.PriorityPolicyOverflow, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Check that consumer got proper priority policy and TTL info := c.CachedInfo() if info.Config.PriorityPolicy != jetstream.PriorityPolicyOverflow { t.Fatalf("Invalid priority policy; expected: %v; got: %v", jetstream.PriorityPolicyOverflow, info.Config.PriorityPolicy) } for i := range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte(fmt.Sprintf("hello %d", i))) if err != nil { t.Fatalf("Unexpected error: %v", err) } } count := atomic.Uint32{} errs := make(chan error, 10) done := make(chan struct{}, 1) handler := func(it jetstream.MessagesContext) { for { msg, err := it.Next() if err != nil { if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { errs <- err } break } if err := msg.Ack(); err != nil { errs <- err break } count.Add(1) } done <- struct{}{} } it, err := c.Messages(jetstream.PullPriorityGroup("A"), jetstream.PullMinPending(110), jetstream.PullMaxMessages(20)) if err != nil { t.Fatalf("Unexpected error: %v", err) } go handler(it) time.Sleep(100 * time.Millisecond) // there are 100 messages on the stream, and min pending is 110 // so we should not get any messages if count.Load() != 0 { t.Fatalf("Expected 0 messages, got %d", count.Load()) } // Add more messages for i := 100; i < 200; i++ { _, err = js.Publish(ctx, "FOO.bar", []byte(fmt.Sprintf("hello %d", i))) if err != nil { t.Fatalf("Unexpected error: %v", err) } } time.Sleep(100 * time.Millisecond) // now we should get 91 messages, because `Consume` will // keep getting messages until it drops below min pending if count.Load() != 91 { t.Fatalf("Expected 10 messages, got %d", count.Load()) } it.Stop() <-done // now test min ack pending count.Store(0) it, err = c.Messages(jetstream.PullPriorityGroup("A"), jetstream.PullMinAckPending(5), jetstream.PullMaxMessages(20)) if err != nil { t.Fatalf("Unexpected error: %v", err) } go handler(it) time.Sleep(100 * time.Millisecond) // no messages should be received, there are no pending acks if count.Load() != 0 { t.Fatalf("Expected 0 messages, got %d", count.Load()) } // fetch some messages, do not ack them msgs, err := c.Fetch(10, jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } i := 0 for range msgs.Messages() { i++ } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } if i != 10 { t.Fatalf("Expected 10 messages, got %d", i) } time.Sleep(100 * time.Millisecond) // we should process the rest of the stream minus the 10 unacked messages // 200 - 91 - 10 = 99 if count.Load() != 99 { t.Fatalf("Expected 5 messages, got %d", count.Load()) } it.Stop() <-done }) } func TestConsumerPinned(t *testing.T) { t.Run("messages", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", PriorityPolicy: jetstream.PriorityPolicyPinned, PinnedTTL: time.Second, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range 1000 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } gcount := make(chan struct{}, 1000) count := atomic.Uint32{} // Initially pinned consumer instance initiallyPinned, err := s.Consumer(ctx, "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } handler := func(it jetstream.MessagesContext, counter *atomic.Uint32, doneCh chan struct{}) { for { msg, err := it.Next() if err != nil { break } if err := msg.Ack(); err != nil { break } counter.Add(1) gcount <- struct{}{} } doneCh <- struct{}{} } // test priority group validation // invalid priority group _, err = initiallyPinned.Messages(jetstream.PullPriorityGroup("BAD")) if err == nil || err.Error() != "nats: invalid jetstream option: invalid priority group" { t.Fatalf("Expected invalid priority group error, got %v", err) } // no priority group _, err = initiallyPinned.Messages() if err == nil || err.Error() != "nats: invalid jetstream option: priority group is required for priority consumer" { t.Fatalf("Expected invalid priority group error") } ipDoneCh := make(chan struct{}) ip, err := initiallyPinned.Messages(jetstream.PullPriorityGroup("A"), jetstream.PullHeartbeat(500*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer ip.Stop() _, err = ip.Next() if err != nil { t.Fatalf("Unexpected error: %v", err) } count.Store(1) go handler(ip, &count, ipDoneCh) time.Sleep(100 * time.Millisecond) // Second consume instance that should remain passive. notPinnedC := atomic.Uint32{} npDoneCh := make(chan struct{}) np, err := c.Messages(jetstream.PullPriorityGroup("A"), jetstream.PullHeartbeat(500*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer np.Stop() go handler(np, ¬PinnedC, npDoneCh) waitForCounter := func(t *testing.T, c *atomic.Uint32, expected int) { t.Helper() outer: for { select { case <-gcount: if c.Load() == uint32(expected) { break outer } case <-time.After(10 * time.Second): t.Fatalf("Did not get all messages in time; expected %d, got %d", expected, c.Load()) } } } waitForCounter(t, &count, 1000) if notPinnedC.Load() != 0 { t.Fatalf("Expected 0 messages for not pinned, got %d", notPinnedC.Load()) } count.Store(0) ip.Stop() for range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } time.Sleep(100 * time.Millisecond) if notPinnedC.Load() != 0 { t.Fatalf("Expected 0 messages for not pinned, got %d", notPinnedC.Load()) } //wait for pinned ttl to expire and messages to be consumed by the second consumer waitForCounter(t, ¬PinnedC, 100) if count.Load() != 0 { t.Fatalf("Expected 0 messages for pinned, got %d", count.Load()) } np.Stop() select { case <-ipDoneCh: case <-time.After(5 * time.Second): t.Fatalf("Expected pinned consumer to be done") } select { case <-npDoneCh: case <-time.After(5 * time.Second): t.Fatalf("Expected not pinned consumer to be done") } }) t.Run("consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", PriorityPolicy: jetstream.PriorityPolicyPinned, PinnedTTL: 1 * time.Second, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range 1000 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // Initially pinned consumer instance initiallyPinned, err := s.Consumer(ctx, "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } // test priority group validation // invalid priority group _, err = initiallyPinned.Consume(func(m jetstream.Msg) { }, jetstream.PullPriorityGroup("BAD")) if err == nil || err.Error() != "nats: invalid jetstream option: invalid priority group" { t.Fatalf("Expected invalid priority group error") } // no priority group _, err = initiallyPinned.Consume(func(m jetstream.Msg) {}) if err == nil || err.Error() != "nats: invalid jetstream option: priority group is required for priority consumer" { t.Fatalf("Expected invalid priority group error") } pinnedCount := atomic.Uint32{} pinnedDone := make(chan struct{}) ip, err := initiallyPinned.Consume(func(m jetstream.Msg) { if err := m.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } if pinnedCount.Add(1) == 1000 { close(pinnedDone) } }, jetstream.PullThresholdMessages(10), jetstream.PullPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer ip.Stop() time.Sleep(100 * time.Millisecond) // Second consume instance that should remain passive. notPinnedCount := atomic.Uint32{} notPinnedDone := make(chan struct{}) np, err := c.Consume(func(m jetstream.Msg) { if err := m.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } if notPinnedCount.Add(1) == 100 { close(notPinnedDone) } }, jetstream.PullPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer np.Stop() select { case <-pinnedDone: case <-time.After(10 * time.Second): t.Fatalf("Expected pinned consumer to be done") } if notPinnedCount.Load() != 0 { t.Fatalf("Expected 0 messages for not pinned, got %d", notPinnedCount.Load()) } pinnedCount.Store(0) ip.Stop() for range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } time.Sleep(100 * time.Millisecond) if notPinnedCount.Load() != 0 { t.Fatalf("Expected 0 messages for not pinned, got %d", notPinnedCount.Load()) } //wait for pinned ttl to expire and messages to be consumed by the second consumer select { case <-notPinnedDone: case <-time.After(10 * time.Second): t.Fatalf("Expected not pinned consumer to be done after pinned ttl expired") } if pinnedCount.Load() != 0 { t.Fatalf("Expected 0 messages for pinned, got %d", pinnedCount.Load()) } }) t.Run("fetch", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", PriorityPolicy: jetstream.PriorityPolicyPinned, PinnedTTL: time.Second, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Check that consumer got proper priority policy and TTL info := c.CachedInfo() if info.Config.PriorityPolicy != jetstream.PriorityPolicyPinned { t.Fatalf("Invalid priority policy; expected: %v; got: %v", jetstream.PriorityPolicyPinned, info.Config.PriorityPolicy) } if info.Config.PinnedTTL != time.Second { t.Fatalf("Invalid pinned TTL; expected: %v; got: %v", time.Second, info.Config.PinnedTTL) } for range 100 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // Initial fetch. // Should get all messages and get a Pin ID. msgs, err := c.Fetch(10, jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count := 0 id := "" for msg := range msgs.Messages() { msg.Ack() count++ natsMsgId := msg.Headers().Get("Nats-Pin-Id") if id == "" { id = natsMsgId } else { if id != natsMsgId { t.Fatalf("Expected Nats-Msg-Id to be the same for all messages") } } } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } // Different consumer instance. cdiff, err := js.Consumer(ctx, "foo", "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err = cdiff.Fetch(10, jetstream.FetchMaxWait(200*time.Millisecond), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } count = 0 for msg := range msgs.Messages() { msg.Ack() count++ } if count != 0 { t.Fatalf("Expected 0 messages, got %d", count) } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } count = 0 // Now lets fetch from the pinned one, which should be fine. msgs, err = c.Fetch(10, jetstream.FetchMaxWait(3*time.Second), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } for msg := range msgs.Messages() { if pinId := msg.Headers().Get("Nats-Pin-Id"); pinId == "" { t.Fatalf("missing Nats-Pin-Id header") } msg.Ack() count++ } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } // Wait for the TTL to expire, expect different ID count = 0 time.Sleep(1500 * time.Millisecond) // The same instance, should work fine. msgs, err = c.Fetch(10, jetstream.FetchMaxWait(500*time.Millisecond), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } for msg := range msgs.Messages() { msg.Ack() count++ } if !errors.Is(msgs.Error(), jetstream.ErrPinIDMismatch) { t.Fatalf("Expected error: %v, got: %v", jetstream.ErrPinIDMismatch, msgs.Error()) } if count != 0 { t.Fatalf("Expected 0 messages, got %d", count) } msgs, err = c.Fetch(10, jetstream.FetchMaxWait(500*time.Millisecond), jetstream.FetchPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } for msg := range msgs.Messages() { if msg == nil { break } newId := msg.Headers().Get("Nats-Pin-Id") if newId == id { t.Fatalf("Expected new pull to have different ID. old: %s, new: %s", id, newId) } msg.Ack() count++ } if msgs.Error() != nil { t.Fatalf("Unexpected error: %v", msgs.Error()) } if count != 10 { t.Fatalf("Expected 10 messages, got %d", count) } }) } func TestConsumerUnpin(t *testing.T) { t.Run("unpin consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", PriorityPolicy: jetstream.PriorityPolicyPinned, PinnedTTL: 50 * time.Second, PriorityGroups: []string{"A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range 1000 { _, err = js.Publish(ctx, "FOO.bar", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } } msgs, err := c.Messages(jetstream.PullPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer msgs.Stop() msg, err := msgs.Next() if err != nil { t.Fatalf("Unexpected error: %v", err) } firstPinID := msg.Headers().Get("Nats-Pin-Id") if firstPinID == "" { t.Fatalf("Expected pinned message") } second, err := s.Consumer(ctx, "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } noMsgs, err := second.Messages(jetstream.PullPriorityGroup("A")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer noMsgs.Stop() done := make(chan struct{}) errC := make(chan error) go func() { _, err := noMsgs.Next() if err != nil { errC <- err return } done <- struct{}{} }() select { case <-done: t.Fatalf("Expected no message") case <-time.After(500 * time.Millisecond): noMsgs.Stop() } select { case <-time.After(5 * time.Second): t.Fatalf("Expected error") case err := <-errC: if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v, got: %v", jetstream.ErrMsgIteratorClosed, err) } } third, err := s.Consumer(ctx, "cons") if err != nil { t.Fatalf("Unexpected error: %v", err) } yesMsgs, err := third.Messages(jetstream.PullPriorityGroup("A"), jetstream.PullExpiry(time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer yesMsgs.Stop() go func() { msg, err := yesMsgs.Next() newPinID := msg.Headers().Get("Nats-Pin-Id") if newPinID == firstPinID || newPinID == "" { errC <- fmt.Errorf("Expected new pin ID, got %s", newPinID) return } if err != nil { errC <- err return } done <- struct{}{} }() err = s.UnpinConsumer(ctx, "cons", "A") if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-done: case err := <-errC: t.Fatalf("Unexpected error: %v", err) case <-time.After(4 * time.Second): t.Fatalf("Should not time out") } }) t.Run("consumer not found", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // try unpinning consumer with invalid name err = s.UnpinConsumer(ctx, "cons", "A") if !errors.Is(err, jetstream.ErrConsumerNotFound) { t.Fatalf("Expected error: %v, got: %v", jetstream.ErrConsumerNotFound, err) } }) } func TestConsumerCachedInfo(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info := c.CachedInfo() if info.Stream != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Stream) } if info.Config.Description != "test consumer" { t.Fatalf("Invalid consumer description; expected: 'test consumer'; got: %s", info.Config.Description) } // update consumer and see if info is updated _, err = s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "updated consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info = c.CachedInfo() if info.Stream != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Stream) } // description should not be updated when using cached values if info.Config.Description != "test consumer" { t.Fatalf("Invalid consumer description; expected: 'updated consumer'; got: %s", info.Config.Description) } } nats.go-1.41.0/jetstream/test/errors_test.go000066400000000000000000000145431477351342400210460ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "os" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestJetStreamErrors(t *testing.T) { t.Run("API error", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: rip jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts: { JS: { jetstream: enabled users: [ {user: dlc, password: foo} ] }, IU: { users: [ {user: rip, password: bar} ] }, } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.AccountInfo(ctx) // check directly to var (backwards compatible) if err != jetstream.ErrJetStreamNotEnabledForAccount { t.Fatalf("Did not get the proper error, got %v", err) } // matching via errors.Is if ok := errors.Is(err, jetstream.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected jetstream.ErrJetStreamNotEnabledForAccount") } // matching wrapped via error.Is err2 := fmt.Errorf("custom error: %w", jetstream.ErrJetStreamNotEnabledForAccount) if ok := errors.Is(err2, jetstream.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected wrapped ErrJetStreamNotEnabled") } // via classic type assertion. jserr, ok := err.(jetstream.JetStreamError) if !ok { t.Fatal("Expected a jetstream.JetStreamError") } expected := jetstream.JSErrCodeJetStreamNotEnabledForAccount if jserr.APIError().ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, jserr.APIError().ErrorCode) } if jserr.APIError() == nil { t.Fatal("Expected APIError") } // matching to interface via errors.As(...) var apierr jetstream.JetStreamError ok = errors.As(err, &apierr) if !ok { t.Fatal("Expected a jetstream.JetStreamError") } if apierr.APIError() == nil { t.Fatal("Expected APIError") } if apierr.APIError().ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, apierr.APIError().ErrorCode) } expectedMessage := "nats: API error: code=503 err_code=10039 description=jetstream not enabled for account" if apierr.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, apierr.Error()) } // an APIError also implements the jetstream.JetStreamError interface. var _ jetstream.JetStreamError = &jetstream.APIError{} // matching arbitrary custom error via errors.Is(...) customErr := &jetstream.APIError{ErrorCode: expected} if ok := errors.Is(customErr, jetstream.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected wrapped jetstream.ErrJetStreamNotEnabledForAccount") } customErr = &jetstream.APIError{ErrorCode: 1} if ok := errors.Is(customErr, jetstream.ErrJetStreamNotEnabledForAccount); ok { t.Fatal("Expected to not match ErrJetStreamNotEnabled") } var cerr jetstream.JetStreamError if ok := errors.As(customErr, &cerr); !ok { t.Fatal("Expected custom error to be a jetstream.JetStreamError") } // matching to concrete type via errors.As(...) var aerr *jetstream.APIError ok = errors.As(err, &aerr) if !ok { t.Fatal("Expected an APIError") } if aerr.ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, aerr.ErrorCode) } expectedMessage = "nats: API error: code=503 err_code=10039 description=jetstream not enabled for account" if aerr.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, apierr.Error()) } }) t.Run("test non-api error", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // stream with empty name _, err = js.CreateStream(ctx, jetstream.StreamConfig{}) if err == nil { t.Fatalf("Expected error, got nil") } // check directly to var (backwards compatible) if err != jetstream.ErrStreamNameRequired { t.Fatalf("Expected: %v; got: %v", jetstream.ErrInvalidStreamName, err) } // matching via errors.Is if ok := errors.Is(err, jetstream.ErrStreamNameRequired); !ok { t.Fatalf("Expected: %v; got: %v", jetstream.ErrStreamNameRequired, err) } // matching wrapped via error.Is err2 := fmt.Errorf("custom error: %w", jetstream.ErrStreamNameRequired) if ok := errors.Is(err2, jetstream.ErrStreamNameRequired); !ok { t.Fatal("Expected wrapped ErrStreamNameRequired") } // via classic type assertion. jserr, ok := err.(jetstream.JetStreamError) if !ok { t.Fatal("Expected a jetstream.JetStreamError") } if jserr.APIError() != nil { t.Fatalf("Expected: empty APIError; got: %v", jserr.APIError()) } // matching to interface via errors.As(...) var jserr2 jetstream.JetStreamError ok = errors.As(err, &jserr2) if !ok { t.Fatal("Expected a jetstream.JetStreamError") } if jserr2.APIError() != nil { t.Fatalf("Expected: empty APIError; got: %v", jserr2.APIError()) } expectedMessage := "nats: stream name is required" if jserr2.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, jserr2.Error()) } // matching to concrete type via errors.As(...) var aerr *jetstream.APIError ok = errors.As(err, &aerr) if ok { t.Fatal("Expected ErrStreamNameRequired not to map to APIError") } }) } nats.go-1.41.0/jetstream/test/helper_test.go000066400000000000000000000212261477351342400210050ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "net" "net/url" "os" "strconv" "strings" "sync" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" natsserver "github.com/nats-io/nats-server/v2/test" ) type jsServer struct { *server.Server myopts *server.Options restart sync.Mutex } // Restart can be used to start again a server // using the same listen address as before. func (srv *jsServer) Restart() { srv.restart.Lock() defer srv.restart.Unlock() srv.Server = natsserver.RunServer(srv.myopts) } // Dumb wait program to sync on callbacks, etc... Will timeout func Wait(ch chan bool) error { return WaitTime(ch, 5*time.Second) } // Wait for a chan with a timeout. func WaitTime(ch chan bool, timeout time.Duration) error { select { case <-ch: return nil case <-time.After(timeout): } return errors.New("timeout") } //////////////////////////////////////////////////////////////////////////////// // Creating client connections //////////////////////////////////////////////////////////////////////////////// // NewDefaultConnection func NewDefaultConnection(t *testing.T) *nats.Conn { return NewConnection(t, nats.DefaultPort) } // NewConnection forms connection on a given port. func NewConnection(t *testing.T, port int) *nats.Conn { url := fmt.Sprintf("nats://127.0.0.1:%d", port) nc, err := nats.Connect(url) if err != nil { t.Fatalf("Failed to create default connection: %v\n", err) return nil } return nc } //////////////////////////////////////////////////////////////////////////////// // Running nats server in separate Go routines //////////////////////////////////////////////////////////////////////////////// // RunDefaultServer will run a server on the default port. func RunDefaultServer() *server.Server { return RunServerOnPort(nats.DefaultPort) } // RunServerOnPort will run a server on the given port. func RunServerOnPort(port int) *server.Server { opts := natsserver.DefaultTestOptions opts.Port = port opts.Cluster.Name = "testing" return RunServerWithOptions(opts) } // RunServerWithOptions will run a server with the given options. func RunServerWithOptions(opts server.Options) *server.Server { return natsserver.RunServer(&opts) } // RunServerWithConfig will run a server with the given configuration file. func RunServerWithConfig(configFile string) (*server.Server, *server.Options) { return natsserver.RunServerWithConfig(configFile) } func RunBasicJetStreamServer() *server.Server { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.JetStream = true return RunServerWithOptions(opts) } func createConfFile(t *testing.T, content []byte) string { t.Helper() conf, err := os.CreateTemp("", "") if err != nil { t.Fatalf("Error creating conf file: %v", err) } fName := conf.Name() if err := conf.Close(); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := os.WriteFile(fName, content, 0666); err != nil { if err := os.Remove(fName); err != nil { t.Fatalf("Unexpected error: %v", err) } t.Fatalf("Error writing conf file: %v", err) } return fName } func shutdownJSServerAndRemoveStorage(t *testing.T, s *server.Server) { t.Helper() var sd string if config := s.JetStreamConfig(); config != nil { sd = config.StoreDir } s.Shutdown() if sd != "" { if err := os.RemoveAll(sd); err != nil { t.Fatalf("Unable to remove storage %q: %v", sd, err) } } s.WaitForShutdown() } func setupJSClusterWithSize(t *testing.T, clusterName string, size int) []*jsServer { t.Helper() nodes := make([]*jsServer, size) opts := make([]*server.Options, 0) var activeListeners []net.Listener getAddr := func(t *testing.T) (string, string, int) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Unexpected error: %v", err) } addr := l.Addr() host := addr.(*net.TCPAddr).IP.String() port := addr.(*net.TCPAddr).Port time.Sleep(100 * time.Millisecond) // we cannot close the listener immediately to avoid duplicate port binding // the returned net.Listener has to be closed after all ports are drawn activeListeners = append(activeListeners, l) return addr.String(), host, port } routes := []string{} for i := 0; i < size; i++ { o := natsserver.DefaultTestOptions o.JetStream = true o.ServerName = fmt.Sprintf("NODE_%d", i) tdir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("%s_%s-", o.ServerName, clusterName)) if err != nil { t.Fatal(err) } o.StoreDir = tdir if size > 1 { o.Cluster.Name = clusterName _, host1, port1 := getAddr(t) o.Host = host1 o.Port = port1 addr2, host2, port2 := getAddr(t) o.Cluster.Host = host2 o.Cluster.Port = port2 o.Tags = []string{o.ServerName} routes = append(routes, fmt.Sprintf("nats://%s", addr2)) } opts = append(opts, &o) } // close all connections used to randomize ports for _, l := range activeListeners { l.Close() } if size > 1 { routesStr := server.RoutesFromStr(strings.Join(routes, ",")) for i, o := range opts { o.Routes = routesStr nodes[i] = &jsServer{Server: natsserver.RunServer(o), myopts: o} } } else { o := opts[0] nodes[0] = &jsServer{Server: natsserver.RunServer(o), myopts: o} } // Wait until JS is ready. srvA := nodes[0] nc, err := nats.Connect(srvA.ClientURL()) if err != nil { t.Error(err) } waitForJSReady(t, nc) nc.Close() return nodes } func waitForJSReady(t *testing.T, nc *nats.Conn) { var err error timeout := time.Now().Add(10 * time.Second) for time.Now().Before(timeout) { // Use a smaller MaxWait here since if it fails, we don't want // to wait for too long since we are going to try again. js, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) if err != nil { t.Fatal(err) } _, err = js.AccountInfo() if err != nil { continue } return } t.Fatalf("Timeout waiting for JS to be ready: %v", err) } func withJSClusterAndStream(t *testing.T, clusterName string, size int, stream jetstream.StreamConfig, tfn func(t *testing.T, subject string, srvs ...*jsServer)) { t.Helper() withJSCluster(t, clusterName, size, func(t *testing.T, nodes ...*jsServer) { srvA := nodes[0] nc, err := nats.Connect(srvA.ClientURL()) if err != nil { t.Error(err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() jsm, err := jetstream.New(nc) if err != nil { t.Fatal(err) } CreateStream: for { select { case <-ctx.Done(): if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } t.Fatalf("Unable to create stream on cluster") case <-time.After(500 * time.Millisecond): _, err = jsm.AccountInfo(ctx) if err != nil { // Backoff for a bit until cluster and resources are ready. time.Sleep(500 * time.Millisecond) } _, err = jsm.CreateStream(ctx, stream) if err != nil { continue CreateStream } break CreateStream } } tfn(t, stream.Name, nodes...) }) } func withJSCluster(t *testing.T, clusterName string, size int, tfn func(t *testing.T, srvs ...*jsServer)) { t.Helper() nodes := setupJSClusterWithSize(t, clusterName, size) defer func() { // Ensure that they get shutdown and remove their state. for _, node := range nodes { node.restart.Lock() shutdownJSServerAndRemoveStorage(t, node.Server) node.restart.Unlock() } }() tfn(t, nodes...) } func restartBasicJSServer(t *testing.T, s *server.Server) *server.Server { opts := natsserver.DefaultTestOptions clientURL, err := url.Parse(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } port, err := strconv.Atoi(clientURL.Port()) if err != nil { t.Fatalf("Unexpected error: %v", err) } opts.Port = port opts.JetStream = true opts.StoreDir = s.JetStreamConfig().StoreDir s.Shutdown() s.WaitForShutdown() return RunServerWithOptions(opts) } func checkFor(t *testing.T, totalWait, sleepDur time.Duration, f func() error) { t.Helper() timeout := time.Now().Add(totalWait) var err error for time.Now().Before(timeout) { err = f() if err == nil { return } time.Sleep(sleepDur) } if err != nil { t.Fatal(err.Error()) } } nats.go-1.41.0/jetstream/test/jetstream_test.go000066400000000000000000001624531477351342400215340ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "os" "reflect" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestNewWithAPIPrefix(t *testing.T) { t.Run("import subject from another account", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: test_user jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts: { JS: { jetstream: enabled users: [ {user: main, password: foo} ] exports [ { service: "$JS.API.>" }, { service: "foo" }] }, U: { users: [ {user: test_user, password: bar} ] imports [ { service: { subject: "$JS.API.>", account: JS } , to: "main.>" } { service: { subject: "foo", account: JS } } ] }, } `)) defer os.Remove(conf) srv, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, srv) ncMain, err := nats.Connect(srv.ClientURL(), nats.UserInfo("main", "foo")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer ncMain.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() jsMain, err := jetstream.New(ncMain) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = jsMain.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } ncTest, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer ncTest.Close() jsTest, err := jetstream.NewWithAPIPrefix(ncTest, "main") if err != nil { t.Fatalf("Unexpected error: %v", err) } opts := jsTest.Options() if opts.APIPrefix != "main" { t.Fatalf("Invalid API prefix; want: %v, got: %v", "main", opts.APIPrefix) } if opts.Domain != "" { t.Fatalf("Invalid domain; want: %v, got: %v", "", opts.Domain) } _, err = jsTest.Publish(ctx, "foo", []byte("msg")) if err != nil { t.Fatalf("Unexpected error: %v", err) } }) t.Run("empty API prefix", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = jetstream.NewWithAPIPrefix(nc, "") if err == nil || err.Error() != "API prefix cannot be empty" { t.Fatalf(`Expected error: "API prefix cannot be empty"; got: %v`, err) } }) } func TestNewWithDomain(t *testing.T) { t.Run("jetstream account with domain", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: { domain: ABC } `)) defer os.Remove(conf) srv, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL(), nats.UserInfo("main", "foo")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() js, err := jetstream.NewWithDomain(nc, "ABC") if err != nil { t.Fatalf("Unexpected error: %v", err) } accInfo, err := js.AccountInfo(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if accInfo.Domain != "ABC" { t.Errorf("Invalid domain; want %v, got: %v", "ABC", accInfo.Domain) } opts := js.Options() if opts.APIPrefix != "" { t.Fatalf("Invalid API prefix; want: %v, got: %v", "main", opts.APIPrefix) } if opts.Domain != "ABC" { t.Fatalf("Invalid domain; want: %v, got: %v", "", opts.Domain) } _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.Publish(ctx, "foo", []byte("msg")) if err != nil { t.Fatalf("Unexpected error: %v", err) } }) t.Run("empty domain", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = jetstream.NewWithDomain(nc, "") if err == nil || err.Error() != "domain cannot be empty" { t.Fatalf(`Expected error: "domain cannot be empty"; got: %v`, err) } }) } func TestJetStreamOptionsReadOnly(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } opts := js.Options() opts.APIPrefix = "foo" opts.Domain = "bar" opts = js.Options() if opts.APIPrefix != "" { t.Fatalf("Invalid API prefix; want: %v, got: %v", "", opts.APIPrefix) } if opts.Domain != "" { t.Fatalf("Invalid domain; want: %v, got: %v", "", opts.Domain) } } func TestWithClientTrace(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() var sent, received string js, err := jetstream.New(nc, jetstream.WithClientTrace(&jetstream.ClientTrace{ RequestSent: func(subj string, _ []byte) { sent = fmt.Sprintf("Request sent: %s", subj) }, ResponseReceived: func(subj string, _ []byte, _ nats.Header) { received = fmt.Sprintf("Response received: %s", subj) }, })) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if sent != "Request sent: $JS.API.STREAM.CREATE.foo" { t.Fatalf(`Invalid value on sent request trace; want: "Request sent: $JS.API.STREAM.CREATE.foo"; got: %s`, sent) } if received != "Response received: $JS.API.STREAM.CREATE.foo" { t.Fatalf(`Invalid value on response receive trace; want: "Response received: $JS.API.STREAM.CREATE.foo"; got: %s`, sent) } defer nc.Close() } func TestCreateStream(t *testing.T) { tests := []struct { name string stream string subject string metadata map[string]string timeout time.Duration withError error }{ { name: "create stream, ok", stream: "foo", timeout: 10 * time.Second, subject: "FOO.123", }, { name: "create stream with metadata", stream: "foo_meta", metadata: map[string]string{ "foo": "bar", "name": "test", }, timeout: 10 * time.Second, subject: "FOO.meta", }, { name: "create stream with metadata, reserved prefix", stream: "foo_meta1", metadata: map[string]string{ "foo": "bar", "_nats_version": "2.10.0", }, timeout: 10 * time.Second, subject: "FOO.meta1", }, { name: "with empty context", stream: "foo_empty_ctx", subject: "FOO.ctx", }, { name: "invalid stream name", stream: "foo.123", subject: "FOO.123", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, }, { name: "stream name required", stream: "", subject: "FOO.123", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, }, { name: "stream name already in use", stream: "foo", subject: "BAR.123", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameAlreadyInUse, }, { name: "context timeout", stream: "foo", subject: "BAR.123", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: test.stream, Subjects: []string{test.subject}, Metadata: test.metadata}) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } for k, v := range test.metadata { if s.CachedInfo().Config.Metadata[k] != v { t.Fatalf("Invalid metadata; want: %v, got: %v", test.metadata, s.CachedInfo().Config.Metadata) } } }) } } func TestCreateStreamMirrorCrossDomains(t *testing.T) { test := []struct { name string streamConfig *jetstream.StreamConfig }{ { name: "create stream mirror cross domains", streamConfig: &jetstream.StreamConfig{ Name: "MIRROR", Mirror: &jetstream.StreamSource{ Name: "TEST", Domain: "HUB", }, }, }, { name: "create stream with source cross domains", streamConfig: &jetstream.StreamConfig{ Name: "MIRROR", Sources: []*jetstream.StreamSource{ { Name: "TEST", Domain: "HUB", }, }, }, }, } for _, test := range test { t.Run(test.name, func(t *testing.T) { conf := createConfFile(t, []byte(` server_name: HUB listen: 127.0.0.1:-1 jetstream: { domain: HUB } leafnodes { listen: 127.0.0.1:7422 } }`)) defer os.Remove(conf) srv, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, srv) lconf := createConfFile(t, []byte(` server_name: LEAF listen: 127.0.0.1:-1 jetstream: { domain:LEAF } leafnodes { remotes = [ { url: "leaf://127.0.0.1" } ] } }`)) defer os.Remove(lconf) ln, _ := RunServerWithConfig(lconf) defer shutdownJSServerAndRemoveStorage(t, ln) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "foo", []byte("msg1")); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "foo", []byte("msg2")); err != nil { t.Fatalf("Unexpected error: %v", err) } lnc, err := nats.Connect(ln.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer lnc.Close() ljs, err := jetstream.New(lnc) if err != nil { t.Fatalf("Unexpected error: %v", err) } ccfg := *test.streamConfig _, err = ljs.CreateStream(ctx, ccfg) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !reflect.DeepEqual(test.streamConfig, &ccfg) { t.Fatalf("Did not expect config to be altered: %+v vs %+v", test.streamConfig, ccfg) } // Make sure we sync. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { lStream, err := ljs.Stream(ctx, "MIRROR") if err != nil { t.Fatalf("Unexpected error: %v", err) } if lStream.CachedInfo().State.Msgs == 2 { return nil } return fmt.Errorf("Did not get synced messages: %d", lStream.CachedInfo().State.Msgs) }) if _, err := ljs.Publish(ctx, "foo", []byte("msg3")); err != nil { t.Fatalf("Unexpected error: %v", err) } lStream, err := ljs.Stream(ctx, "MIRROR") if err != nil { t.Fatalf("Unexpected error: %v", err) } checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { info, err := lStream.Info(ctx) if err != nil { return fmt.Errorf("Unexpected error when getting stream info: %v", err) } if info.State.Msgs != 3 { return fmt.Errorf("Expected 3 msgs in stream; got: %d", lStream.CachedInfo().State.Msgs) } return nil }) rjs, err := jetstream.NewWithDomain(lnc, "HUB") if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = rjs.Stream(ctx, "TEST") if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := rjs.Publish(ctx, "foo", []byte("msg4")); err != nil { t.Fatalf("Unexpected error: %v", err) } rStream, err := rjs.Stream(ctx, "TEST") if err != nil { t.Fatalf("Unexpected error: %v", err) } if rStream.CachedInfo().State.Msgs != 4 { t.Fatalf("Expected 3 msgs in stream; got: %d", rStream.CachedInfo().State.Msgs) } }) } } func TestCreateOrUpdateStream(t *testing.T) { tests := []struct { name string stream string subject string timeout time.Duration withError error withInfoCheck bool }{ { name: "create stream ok", stream: "foo", timeout: 10 * time.Second, subject: "FOO.1", withInfoCheck: false, }, { name: "create stream empty context", stream: "foo-o", subject: "FOO.12", withInfoCheck: false, }, { name: "create stream invalid stream name", stream: "foo.123", subject: "FOO-123", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, withInfoCheck: false, }, { name: "create stream stream name required", stream: "", subject: "FOO-1234", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, withInfoCheck: false, }, { name: "update stream ok", stream: "foo", subject: "BAR-123", timeout: 10 * time.Second, withInfoCheck: true, }, { name: "create stream context timeout", stream: "foo", subject: "BAR-1234", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, withInfoCheck: false, }, { name: "update stream with empty context", stream: "sample-foo-1", subject: "SAMPLE-FOO-123", withInfoCheck: true, }, { name: "update stream invalid stream name", stream: "sample-foo.123", subject: "SAMPLE-FOO-1234", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, withInfoCheck: true, }, { name: "update stream stream name required", stream: "", subject: "SAMPLE-FOO-123", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, withInfoCheck: true, }, { name: "update stream context timeout", stream: "sample-foo-2", subject: "SAMPLE-FOO-123456", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, withInfoCheck: true, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } s, err := js.CreateOrUpdateStream(ctx, jetstream.StreamConfig{Name: test.stream, Subjects: []string{test.subject}}) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.withInfoCheck { info, err := s.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(info.Config.Subjects) != 1 || info.Config.Subjects[0] != test.subject { t.Fatalf("Invalid stream subjects after update: %v", info.Config.Subjects) } } }) } } func TestUpdateStream(t *testing.T) { tests := []struct { name string stream string subject string metadata map[string]string timeout time.Duration withError error }{ { name: "update existing stream", stream: "foo", subject: "BAR.123", timeout: 10 * time.Second, }, { name: "with empty context", stream: "foo", subject: "FOO.123", }, { name: "update existing, add metadata", stream: "foo", subject: "BAR.123", metadata: map[string]string{ "foo": "bar", "name": "test", }, timeout: 10 * time.Second, }, { name: "invalid stream name", stream: "foo.123", subject: "FOO.123", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, }, { name: "stream name required", stream: "", subject: "FOO.123", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, }, { name: "stream not found", stream: "bar", subject: "FOO.123", timeout: 10 * time.Second, withError: jetstream.ErrStreamNotFound, }, { name: "context timeout", stream: "foo", subject: "FOO.123", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } s, err := js.UpdateStream(ctx, jetstream.StreamConfig{Name: test.stream, Subjects: []string{test.subject}, Metadata: test.metadata}) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := s.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(info.Config.Subjects) != 1 || info.Config.Subjects[0] != test.subject { t.Fatalf("Invalid stream subjects after update: %v", info.Config.Subjects) } for k, v := range test.metadata { if info.Config.Metadata[k] != v { t.Fatalf("Invalid metadata; want: %v, got: %v", test.metadata, info.Config.Metadata) } } }) } } func TestStream(t *testing.T) { tests := []struct { name string stream string subject string timeout time.Duration withError error }{ { name: "get existing stream", stream: "foo", timeout: 10 * time.Second, }, { name: "with empty context", stream: "foo", }, { name: "invalid stream name", stream: "foo.123", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, }, { name: "stream name required", stream: "", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, }, { name: "stream not found", stream: "bar", timeout: 10 * time.Second, withError: jetstream.ErrStreamNotFound, }, { name: "context timeout", stream: "foo", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } s, err := js.Stream(ctx, test.stream) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if s.CachedInfo().Config.Name != test.stream { t.Fatalf("Invalid stream fetched; want: foo; got: %s", s.CachedInfo().Config.Name) } }) } } func TestDeleteStream(t *testing.T) { tests := []struct { name string stream string subject string timeout time.Duration withError error }{ { name: "delete existing stream", stream: "foo", timeout: 10 * time.Second, }, { name: "with empty context", stream: "bar", }, { name: "invalid stream name", stream: "foo.123", timeout: 10 * time.Second, withError: jetstream.ErrInvalidStreamName, }, { name: "stream name required", stream: "", timeout: 10 * time.Second, withError: jetstream.ErrStreamNameRequired, }, { name: "stream not found", stream: "foo", timeout: 10 * time.Second, withError: jetstream.ErrStreamNotFound, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "bar", Subjects: []string{"BAR.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second) defer cancel() } err := js.DeleteStream(ctx, test.stream) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } }) } } func TestAccountInfo(t *testing.T) { t.Run("fetch account info", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.123"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := js.AccountInfo(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Streams != 1 { t.Fatalf("Invalid number of streams; want: 1; got: %d", info.Streams) } }) t.Run("jetstream not enabled on server", func(t *testing.T) { srv := RunDefaultServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.AccountInfo(ctx) if err == nil || !errors.Is(err, jetstream.ErrJetStreamNotEnabled) { t.Fatalf(": %v; got: %v", jetstream.ErrJetStreamNotEnabled, err) } }) t.Run("jetstream not enabled for account", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: enabled no_auth_user: foo accounts: { JS: { jetstream: disabled users: [ {user: foo, password: bar} ] }, } `)) defer os.Remove(conf) srv, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.AccountInfo(ctx) if err == nil || !errors.Is(err, jetstream.ErrJetStreamNotEnabledForAccount) { t.Fatalf(": %v; got: %v", jetstream.ErrJetStreamNotEnabledForAccount, err) } }) } func TestListStreams(t *testing.T) { tests := []struct { name string streamsNum int timeout time.Duration subject string expected int withError error }{ { name: "list streams", streamsNum: 260, timeout: 10 * time.Second, expected: 260, }, { name: "with empty context", streamsNum: 260, expected: 260, }, { name: "no stream available", timeout: 10 * time.Second, streamsNum: 0, expected: 0, }, { name: "list streams with subject filter", subject: "FOO.123", streamsNum: 260, expected: 1, }, { name: "list streams with subject filter, no match", subject: "FOO.500", streamsNum: 100, expected: 0, }, { name: "context timeout", streamsNum: 260, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() for i := 0; i < test.streamsNum; i++ { _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: fmt.Sprintf("foo%d", i), Subjects: []string{fmt.Sprintf("FOO.%d", i)}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } } opts := []jetstream.StreamListOpt{} if test.subject != "" { opts = append(opts, jetstream.WithStreamListSubject(test.subject)) } streamsList := js.ListStreams(ctx, opts...) streams := make([]*jetstream.StreamInfo, 0) for si := range streamsList.Info() { streams = append(streams, si) } if test.withError != nil { if !errors.Is(streamsList.Err(), test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, streamsList.Err()) } return } if streamsList.Err() != nil { t.Fatalf("Unexpected error: %s", streamsList.Err()) } if len(streams) != test.expected { t.Fatalf("Wrong number of streams; want: %d; got: %d", test.streamsNum, len(streams)) } }) } } func TestStreamNames(t *testing.T) { tests := []struct { name string streamsNum int subject string expected int timeout time.Duration withError error }{ { name: "list streams", streamsNum: 500, timeout: 10 * time.Second, expected: 500, }, { name: "with empty context", streamsNum: 500, expected: 500, }, { name: "no stream available", streamsNum: 0, expected: 0, timeout: 10 * time.Second, }, { name: "list streams with subject filter", subject: "FOO.123", streamsNum: 260, expected: 1, }, { name: "list streams with subject filter, no match", subject: "FOO.500", streamsNum: 100, expected: 0, timeout: 10 * time.Second, }, { name: "context timeout", streamsNum: 500, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() for i := 0; i < test.streamsNum; i++ { _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: fmt.Sprintf("foo%d", i), Subjects: []string{fmt.Sprintf("FOO.%d", i)}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } } opts := []jetstream.StreamListOpt{} if test.subject != "" { opts = append(opts, jetstream.WithStreamListSubject(test.subject)) } streamsList := js.StreamNames(ctx, opts...) streams := make([]string, 0) for s := range streamsList.Name() { streams = append(streams, s) } if test.withError != nil { if !errors.Is(streamsList.Err(), test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, streamsList.Err()) } return } if streamsList.Err() != nil { t.Fatalf("Unexpected error: %s", streamsList.Err()) } if len(streams) != test.expected { t.Fatalf("Wrong number of streams; want: %d; got: %d", test.streamsNum, len(streams)) } }) } } func TestJetStream_CreateOrUpdateConsumer(t *testing.T) { tests := []struct { name string stream string consumerConfig jetstream.ConsumerConfig shouldCreate bool timeout time.Duration withError error }{ { name: "create durable pull consumer", stream: "foo", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckExplicitPolicy}, timeout: 10 * time.Second, shouldCreate: true, }, { name: "create ephemeral pull consumer", stream: "foo", consumerConfig: jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}, timeout: 10 * time.Second, shouldCreate: true, }, { name: "with empty context", consumerConfig: jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}, stream: "foo", shouldCreate: true, }, { name: "consumer already exists, update", stream: "foo", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer"}, }, { name: "consumer already exists, illegal update", stream: "foo", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckNonePolicy, Description: "test consumer"}, withError: jetstream.ErrConsumerCreate, }, { name: "stream does not exist", stream: "abc", withError: jetstream.ErrStreamNotFound, }, { name: "invalid stream name", stream: "foo.1", withError: jetstream.ErrInvalidStreamName, }, { name: "invalid durable name", stream: "foo", consumerConfig: jetstream.ConsumerConfig{Durable: "dur.123", AckPolicy: jetstream.AckExplicitPolicy}, withError: jetstream.ErrInvalidConsumerName, }, { name: "context timeout", consumerConfig: jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}, stream: "foo", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := js.CreateOrUpdateConsumer(ctx, test.stream, test.consumerConfig) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldCreate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } _, err = js.Consumer(ctx, test.stream, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } }) } } func TestJetStream_CreateConsumer(t *testing.T) { tests := []struct { name string consumerConfig jetstream.ConsumerConfig shouldCreate bool stream string withError error }{ { name: "create consumer", consumerConfig: jetstream.ConsumerConfig{Durable: "dur"}, stream: "foo", shouldCreate: true, }, { name: "consumer already exists, error", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", Description: "test consumer"}, stream: "foo", withError: jetstream.ErrConsumerExists, }, { name: "stream does not exist", stream: "abc", consumerConfig: jetstream.ConsumerConfig{Durable: "dur"}, withError: jetstream.ErrStreamNotFound, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := js.CreateConsumer(ctx, test.stream, test.consumerConfig) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldCreate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } ci, err := s.Consumer(ctx, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.CachedInfo().Config.AckPolicy != test.consumerConfig.AckPolicy { t.Fatalf("Invalid ack policy; want: %s; got: %s", test.consumerConfig.AckPolicy, ci.CachedInfo().Config.AckPolicy) } if !reflect.DeepEqual(test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) { t.Fatalf("Invalid filter subjects; want: %v; got: %v", test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) } }) } } func TestJetStream_UpdateConsumer(t *testing.T) { tests := []struct { name string consumerConfig jetstream.ConsumerConfig shouldUpdate bool stream string withError error }{ { name: "update consumer", consumerConfig: jetstream.ConsumerConfig{Name: "testcons", Description: "updated consumer"}, stream: "foo", shouldUpdate: true, }, { name: "illegal update", consumerConfig: jetstream.ConsumerConfig{Name: "testcons", AckPolicy: jetstream.AckNonePolicy}, stream: "foo", withError: jetstream.ErrConsumerCreate, }, { name: "consumer does not exist", consumerConfig: jetstream.ConsumerConfig{Name: "abc"}, stream: "foo", withError: jetstream.ErrConsumerDoesNotExist, }, { name: "stream does not exist", consumerConfig: jetstream.ConsumerConfig{Name: "testcons"}, stream: "abc", withError: jetstream.ErrStreamNotFound, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateConsumer(ctx, jetstream.ConsumerConfig{Name: "testcons"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := js.UpdateConsumer(ctx, test.stream, test.consumerConfig) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldUpdate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } ci, err := s.Consumer(ctx, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.CachedInfo().Config.AckPolicy != test.consumerConfig.AckPolicy { t.Fatalf("Invalid ack policy; want: %s; got: %s", test.consumerConfig.AckPolicy, ci.CachedInfo().Config.AckPolicy) } if !reflect.DeepEqual(test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) { t.Fatalf("Invalid filter subjects; want: %v; got: %v", test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) } }) } } func TestJetStream_Consumer(t *testing.T) { tests := []struct { name string stream string durable string timeout time.Duration withError error }{ { name: "get existing consumer", stream: "foo", durable: "dur", timeout: 10 * time.Second, }, { name: "with empty context", stream: "foo", durable: "dur", }, { name: "consumer does not exist", stream: "foo", durable: "abc", timeout: 10 * time.Second, withError: jetstream.ErrConsumerNotFound, }, { name: "invalid durable name", stream: "foo", durable: "dur.123", withError: jetstream.ErrInvalidConsumerName, }, { name: "stream does not exist", stream: "abc", durable: "dur", withError: jetstream.ErrStreamNotFound, }, { name: "invalid stream name", stream: "foo.1", durable: "dur", withError: jetstream.ErrInvalidStreamName, }, { name: "context timeout", stream: "foo", durable: "dur", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckAllPolicy, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } c, err := js.Consumer(ctx, test.stream, test.durable) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if c.CachedInfo().Name != test.durable { t.Fatalf("Unexpected consumer fetched; want: %s; got: %s", test.durable, c.CachedInfo().Name) } }) } } func TestJetStream_DeleteConsumer(t *testing.T) { tests := []struct { name string stream string durable string timeout time.Duration withError error }{ { name: "delete existing consumer", stream: "foo", durable: "dur", timeout: 10 * time.Second, }, { name: "with empty context", stream: "foo", durable: "dur2", }, { name: "consumer does not exist", stream: "foo", durable: "dur", timeout: 10 * time.Second, withError: jetstream.ErrConsumerNotFound, }, { name: "invalid durable name", stream: "foo", durable: "dur.123", withError: jetstream.ErrInvalidConsumerName, }, { name: "stream not found", stream: "abc", durable: "dur", withError: jetstream.ErrStreamNotFound, }, { name: "invalid stream name", stream: "foo.1", durable: "dur", withError: jetstream.ErrInvalidStreamName, }, { name: "context timeout", stream: "foo", durable: "dur", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckAllPolicy, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{Durable: "dur2", AckPolicy: jetstream.AckAllPolicy, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } err := js.DeleteConsumer(ctx, test.stream, test.durable) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.Consumer(ctx, test.durable) if err == nil || !errors.Is(err, jetstream.ErrConsumerNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerNotFound, err) } }) } } func TestStreamNameBySubject(t *testing.T) { tests := []struct { name string subject string withError error timeout time.Duration expected string }{ { name: "get stream name by subject explicit", subject: "FOO.123", timeout: 10 * time.Second, expected: "foo", }, { name: "with empty context", subject: "FOO.123", expected: "foo", }, { name: "get stream name by subject with wildcard", subject: "BAR.*", expected: "bar", }, { name: "match more than one stream, return the first one", subject: ">", expected: "", }, { name: "stream not found", subject: "BAR.XYZ", withError: jetstream.ErrStreamNotFound, }, { name: "invalid subject", subject: "FOO.>.123", withError: jetstream.ErrInvalidSubject, }, { name: "context timeout", subject: "FOO.123", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "bar", Subjects: []string{"BAR.ABC"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), test.timeout) defer cancel() } name, err := js.StreamNameBySubject(ctx, test.subject) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.expected != "" && name != test.expected { t.Fatalf("Unexpected stream name; want: %s; got: %s", test.expected, name) } }) } } func TestJetStreamTransform(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() _, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "ORIGIN", Subjects: []string{"test"}, SubjectTransform: &jetstream.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, Storage: jetstream.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } err = nc.Publish("test", []byte("1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } sourcingStream, err := js.CreateStream(ctx, jetstream.StreamConfig{ Subjects: []string{}, Name: "SOURCING", Sources: []*jetstream.StreamSource{{Name: "ORIGIN", SubjectTransforms: []jetstream.SubjectTransformConfig{{Source: ">", Destination: "fromtest.>"}}}}, Storage: jetstream.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } cons, err := sourcingStream.CreateConsumer(ctx, jetstream.ConsumerConfig{FilterSubject: "fromtest.>", MemoryStorage: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } m, err := cons.Next() if err != nil { t.Fatalf("Unexpected error: %v", err) } if m.Subject() != "fromtest.transformed.test" { t.Fatalf("the subject of the message doesn't match the expected fromtest.transformed.test: %s", m.Subject()) } } func TestStreamConfigMatches(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } cfg := jetstream.StreamConfig{ Name: "stream", Description: "desc", Subjects: []string{"foo.*"}, Retention: jetstream.WorkQueuePolicy, MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, Discard: jetstream.DiscardNew, DiscardNewPerSubject: true, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Storage: jetstream.MemoryStorage, Replicas: 1, NoAck: true, Duplicates: 10 * time.Second, Sealed: false, DenyDelete: true, DenyPurge: false, AllowRollup: true, Compression: jetstream.S2Compression, FirstSeq: 5, SubjectTransform: &jetstream.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, RePublish: &jetstream.RePublish{ Source: ">", Destination: "RP.>", HeadersOnly: true, }, AllowDirect: true, ConsumerLimits: jetstream.StreamConsumerLimits{ InactiveThreshold: 10 * time.Second, MaxAckPending: 500, }, } s, err := js.CreateStream(context.Background(), cfg) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.CachedInfo().Config.Metadata = nil if !reflect.DeepEqual(s.CachedInfo().Config, cfg) { t.Fatalf("StreamConfig doesn't match: %#v", s.CachedInfo().Config) } cfgMirror := jetstream.StreamConfig{ Name: "mirror", MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Replicas: 1, Duplicates: 10 * time.Second, Mirror: &jetstream.StreamSource{ Name: "stream", OptStartSeq: 10, SubjectTransforms: []jetstream.SubjectTransformConfig{ {Source: ">", Destination: "transformed.>"}, }, }, MirrorDirect: true, SubjectTransform: &jetstream.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, } s, err = js.CreateStream(context.Background(), cfgMirror) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.CachedInfo().Config.Metadata = nil if !reflect.DeepEqual(s.CachedInfo().Config, cfgMirror) { t.Fatalf("StreamConfig doesn't match: %#v", s.CachedInfo().Config) } cfgSourcing := jetstream.StreamConfig{ Name: "sourcing", Subjects: []string{"BAR"}, MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Replicas: 1, Duplicates: 10 * time.Second, Sources: []*jetstream.StreamSource{ { Name: "stream", OptStartSeq: 10, SubjectTransforms: []jetstream.SubjectTransformConfig{ {Source: ">", Destination: "transformed.>"}, }, }, }, SubjectTransform: &jetstream.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, } s, err = js.CreateStream(context.Background(), cfgSourcing) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.CachedInfo().Config.Metadata = nil if !reflect.DeepEqual(s.CachedInfo().Config, cfgSourcing) { t.Fatalf("StreamConfig doesn't match: %#v", s.CachedInfo().Config) } } func TestConsumerConfigMatches(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{ Name: "FOO", Subjects: []string{"foo.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } cfg := jetstream.ConsumerConfig{ Name: "cons", Durable: "cons", Description: "test", DeliverPolicy: jetstream.DeliverByStartSequencePolicy, OptStartSeq: 5, AckPolicy: jetstream.AckAllPolicy, AckWait: 1 * time.Second, MaxDeliver: 5, BackOff: []time.Duration{1 * time.Second, 2 * time.Second, 3 * time.Second}, ReplayPolicy: jetstream.ReplayOriginalPolicy, SampleFrequency: "50%", MaxWaiting: 100, MaxAckPending: 1000, HeadersOnly: true, MaxRequestBatch: 100, MaxRequestExpires: 10 * time.Second, MaxRequestMaxBytes: 1000, InactiveThreshold: 20 * time.Second, Replicas: 1, MemoryStorage: true, FilterSubjects: []string{"foo.1", "foo.2"}, } c, err := s.CreateConsumer(context.Background(), cfg) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them c.CachedInfo().Config.Metadata = nil if !reflect.DeepEqual(c.CachedInfo().Config, cfg) { t.Fatalf("ConsumerConfig doesn't match") } } func TestJetStreamCleanupPublisher(t *testing.T) { t.Run("cleanup js publisher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // Create a stream if _, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "TEST", Subjects: []string{"FOO"}}); err != nil { t.Fatalf("Unexpected error: %v", err) } numSubs := nc.NumSubscriptions() if _, err := js.PublishAsync("FOO", []byte("hello")); err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } if numSubs+1 != nc.NumSubscriptions() { t.Fatalf("Expected an additional subscription after publish, got %d", nc.NumSubscriptions()) } js.CleanupPublisher() if numSubs != nc.NumSubscriptions() { t.Fatalf("Expected subscriptions to be back to original count") } }) t.Run("cleanup js publisher, cancel pending acks", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() cbErr := make(chan error, 10) js, err := jetstream.New(nc, jetstream.WithPublishAsyncErrHandler(func(js jetstream.JetStream, m *nats.Msg, err error) { cbErr <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Create a stream with NoAck so that we can test that we cancel ack futures. if _, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "TEST", Subjects: []string{"FOO"}, NoAck: true}); err != nil { t.Fatalf("Unexpected error: %v", err) } numSubs := nc.NumSubscriptions() var acks []jetstream.PubAckFuture for i := 0; i < 10; i++ { ack, err := js.PublishAsync("FOO", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } acks = append(acks, ack) } asyncComplete := js.PublishAsyncComplete() select { case <-asyncComplete: t.Fatalf("Should not complete, NoAck is set") case <-time.After(200 * time.Millisecond): } if numSubs+1 != nc.NumSubscriptions() { t.Fatalf("Expected an additional subscription after publish, got %d", nc.NumSubscriptions()) } js.CleanupPublisher() if numSubs != nc.NumSubscriptions() { t.Fatalf("Expected subscriptions to be back to original count") } // check that PublishAsyncComplete channel is closed select { case <-asyncComplete: case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } // check that all ack futures are canceled for _, ack := range acks { select { case err := <-ack.Err(): if !errors.Is(err, jetstream.ErrJetStreamPublisherClosed) { t.Fatalf("Expected JetStreamContextClosed error, got %v", err) } case <-ack.Ok(): t.Fatalf("Expected error on the ack future") case <-time.After(200 * time.Millisecond): t.Fatalf("Expected an error on the ack future") } } // check that async error handler is called for each pending ack for i := 0; i < 10; i++ { select { case err := <-cbErr: if !errors.Is(err, jetstream.ErrJetStreamPublisherClosed) { t.Fatalf("Expected JetStreamContextClosed error, got %v", err) } case <-time.After(200 * time.Millisecond): t.Fatalf("Expected errors to be passed from the async handler") } } }) } nats.go-1.41.0/jetstream/test/kv_test.go000066400000000000000000001471501477351342400201530ustar00rootroot00000000000000// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "os" "reflect" "strconv" "strings" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestKeyValueBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", History: 5, TTL: time.Hour}) expectOk(t, err) if kv.Bucket() != "TEST" { t.Fatalf("Expected bucket name to be %q, got %q", "TEST", kv.Bucket()) } // Simple Put r, err := kv.Put(ctx, "name", []byte("derek")) expectOk(t, err) if r != 1 { t.Fatalf("Expected 1 for the revision, got %d", r) } // Simple Get e, err := kv.Get(ctx, "name") expectOk(t, err) if string(e.Value()) != "derek" { t.Fatalf("Got wrong value: %q vs %q", e.Value(), "derek") } if e.Revision() != 1 { t.Fatalf("Expected 1 for the revision, got %d", e.Revision()) } // Delete err = kv.Delete(ctx, "name") expectOk(t, err) _, err = kv.Get(ctx, "name") expectErr(t, err, jetstream.ErrKeyNotFound) r, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) if r != 3 { t.Fatalf("Expected 3 for the revision, got %d", r) } err = kv.Delete(ctx, "name", jetstream.LastRevision(4)) expectErr(t, err) err = kv.Delete(ctx, "name", jetstream.LastRevision(3)) expectOk(t, err) // Conditional Updates. r, err = kv.Update(ctx, "name", []byte("rip"), 4) expectOk(t, err) _, err = kv.Update(ctx, "name", []byte("ik"), 3) expectErr(t, err) _, err = kv.Update(ctx, "name", []byte("ik"), r) expectOk(t, err) r, err = kv.Create(ctx, "age", []byte("22")) expectOk(t, err) _, err = kv.Update(ctx, "age", []byte("33"), r) expectOk(t, err) // Status status, err := kv.Status(ctx) expectOk(t, err) if status.History() != 5 { t.Fatalf("expected history of 5 got %d", status.History()) } if status.Bucket() != "TEST" { t.Fatalf("expected bucket TEST got %v", status.Bucket()) } if status.TTL() != time.Hour { t.Fatalf("expected 1 hour TTL got %v", status.TTL()) } if status.Values() != 7 { t.Fatalf("expected 7 values got %d", status.Values()) } if status.BackingStore() != "JetStream" { t.Fatalf("invalid backing store kind %s", status.BackingStore()) } kvs := status.(*jetstream.KeyValueBucketStatus) si := kvs.StreamInfo() if si == nil { t.Fatalf("StreamInfo not received") } } func TestCreateKeyValue(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // invalid bucket name _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST.", Description: "Test KV"}) expectErr(t, err, jetstream.ErrInvalidBucketName) _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "Test KV"}) expectOk(t, err) // Check that we can't overwrite existing bucket. _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "New KV"}) expectErr(t, err, jetstream.ErrBucketExists) // assert that we're backwards compatible expectErr(t, err, jetstream.ErrStreamNameAlreadyInUse) } func TestUpdateKeyValue(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // cannot update a non-existing bucket _, err := js.UpdateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "Test KV"}) expectErr(t, err, jetstream.ErrBucketNotFound) _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "Test KV"}) expectOk(t, err) // update the bucket _, err = js.UpdateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "New KV"}) expectOk(t, err) } func TestCreateOrUpdateKeyValue(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // invalid bucket name _, err := js.CreateOrUpdateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST.", Description: "Test KV"}) expectErr(t, err, jetstream.ErrInvalidBucketName) _, err = js.CreateOrUpdateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "Test KV"}) expectOk(t, err) // update the bucket _, err = js.CreateOrUpdateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", Description: "New KV"}) expectOk(t, err) } func TestKeyValueHistory(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "LIST", History: 10}) expectOk(t, err) for i := 0; i < 50; i++ { age := strconv.FormatUint(uint64(i+22), 10) _, err := kv.Put(ctx, "age", []byte(age)) expectOk(t, err) } vl, err := kv.History(ctx, "age") expectOk(t, err) if len(vl) != 10 { t.Fatalf("Expected %d values, got %d", 10, len(vl)) } for i, v := range vl { if v.Key() != "age" { t.Fatalf("Expected key of %q, got %q", "age", v.Key()) } if v.Revision() != uint64(i+41) { // History of 10, sent 50.. t.Fatalf("Expected revision of %d, got %d", i+41, v.Revision()) } age, err := strconv.Atoi(string(v.Value())) expectOk(t, err) if age != i+62 { t.Fatalf("Expected data value of %d, got %d", i+22, age) } } } func TestKeyValueWatch(t *testing.T) { expectUpdateF := func(t *testing.T, watcher jetstream.KeyWatcher) func(key, value string, revision uint64) { return func(key, value string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Key() != key || string(v.Value()) != value || v.Revision() != revision { t.Fatalf("Did not get expected: %q %q %d vs %q %q %d", v.Key(), string(v.Value()), v.Revision(), key, value, revision) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectDeleteF := func(t *testing.T, watcher jetstream.KeyWatcher) func(key string, revision uint64) { return func(key string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Operation() != jetstream.KeyValueDelete { t.Fatalf("Expected a delete operation but got %+v", v) } if v.Revision() != revision { t.Fatalf("Did not get expected revision: %d vs %d", revision, v.Revision()) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectPurgeF := func(t *testing.T, watcher jetstream.KeyWatcher) func(key string, revision uint64) { return func(key string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Operation() != jetstream.KeyValuePurge { t.Fatalf("Expected a delete operation but got %+v", v) } if v.Revision() != revision { t.Fatalf("Did not get expected revision: %d vs %d", revision, v.Revision()) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectInitDoneF := func(t *testing.T, watcher jetstream.KeyWatcher) func() { return func() { t.Helper() select { case v := <-watcher.Updates(): if v != nil { t.Fatalf("Did not get expected: %+v", v) } case <-time.After(time.Second): t.Fatalf("Did not receive a init done like expected") } } } t.Run("default watcher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) watcher, err := kv.WatchAll(ctx) expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // Make sure we already got an initial value marker. expectInitDone() _, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) expectUpdate("name", "derek", 1) _, err = kv.Put(ctx, "name", []byte("rip")) expectOk(t, err) expectUpdate("name", "rip", 2) _, err = kv.Put(ctx, "name", []byte("ik")) expectOk(t, err) expectUpdate("name", "ik", 3) _, err = kv.Put(ctx, "age", []byte("22")) expectOk(t, err) expectUpdate("age", "22", 4) _, err = kv.Put(ctx, "age", []byte("33")) expectOk(t, err) expectUpdate("age", "33", 5) expectOk(t, kv.Delete(ctx, "age")) expectDelete("age", 6) // Stop first watcher. watcher.Stop() // Now try wildcard matching and make sure we only get last value when starting. _, err = kv.Put(ctx, "t.name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "t.name", []byte("ik")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("22")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("44")) expectOk(t, err) watcher, err = kv.Watch(ctx, "t.*") expectOk(t, err) expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectUpdate("t.name", "ik", 8) expectUpdate("t.age", "44", 10) expectInitDone() watcher.Stop() // test watcher with multiple filters watcher, err = kv.WatchFiltered(ctx, []string{"t.name", "name"}) expectOk(t, err) expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectPurge := expectPurgeF(t, watcher) expectUpdate("name", "ik", 3) expectUpdate("t.name", "ik", 8) expectInitDone() err = kv.Purge(ctx, "name") expectOk(t, err) expectPurge("name", 11) defer watcher.Stop() }) t.Run("watcher with history included", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH", History: 64}) expectOk(t, err) _, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) _, err = kv.Put(ctx, "name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "name", []byte("ik")) expectOk(t, err) _, err = kv.Put(ctx, "age", []byte("22")) expectOk(t, err) _, err = kv.Put(ctx, "age", []byte("33")) expectOk(t, err) expectOk(t, kv.Delete(ctx, "age")) // when using IncludeHistory(), UpdatesOnly() is not allowed if _, err := kv.WatchAll(ctx, jetstream.IncludeHistory(), jetstream.UpdatesOnly()); !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected %v, got %v", jetstream.ErrInvalidOption, err) } watcher, err := kv.WatchAll(ctx, jetstream.IncludeHistory()) expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) expectUpdate("name", "derek", 1) expectUpdate("name", "rip", 2) expectUpdate("name", "ik", 3) expectUpdate("age", "22", 4) expectUpdate("age", "33", 5) expectDelete("age", 6) expectInitDone() _, err = kv.Put(ctx, "name", []byte("pp")) expectOk(t, err) expectUpdate("name", "pp", 7) // Stop first watcher. watcher.Stop() _, err = kv.Put(ctx, "t.name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "t.name", []byte("ik")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("22")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("44")) expectOk(t, err) // try wildcard watcher and make sure we get all historical values watcher, err = kv.Watch(ctx, "t.*", jetstream.IncludeHistory()) expectOk(t, err) defer watcher.Stop() expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectUpdate("t.name", "rip", 8) expectUpdate("t.name", "ik", 9) expectUpdate("t.age", "22", 10) expectUpdate("t.age", "44", 11) expectInitDone() _, err = kv.Put(ctx, "t.name", []byte("pp")) expectOk(t, err) expectUpdate("t.name", "pp", 12) }) t.Run("watcher with updates only", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH", History: 64}) expectOk(t, err) _, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) _, err = kv.Put(ctx, "name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "age", []byte("22")) expectOk(t, err) // when using UpdatesOnly(), IncludeHistory() is not allowed if _, err := kv.WatchAll(ctx, jetstream.UpdatesOnly(), jetstream.IncludeHistory()); !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected %v, got %v", jetstream.ErrInvalidOption, err) } watcher, err := kv.WatchAll(ctx, jetstream.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // now update some keys and expect updates _, err = kv.Put(ctx, "name", []byte("pp")) expectOk(t, err) expectUpdate("name", "pp", 4) _, err = kv.Put(ctx, "age", []byte("44")) expectOk(t, err) expectUpdate("age", "44", 5) expectOk(t, kv.Delete(ctx, "age")) expectDelete("age", 6) // Stop first watcher. watcher.Stop() _, err = kv.Put(ctx, "t.name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "t.name", []byte("ik")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("22")) expectOk(t, err) _, err = kv.Put(ctx, "t.age", []byte("44")) expectOk(t, err) // try wildcard watcher and make sure we do not get any values initially watcher, err = kv.Watch(ctx, "t.*", jetstream.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate = expectUpdateF(t, watcher) // update some keys and expect updates _, err = kv.Put(ctx, "t.name", []byte("pp")) expectOk(t, err) expectUpdate("t.name", "pp", 11) _, err = kv.Put(ctx, "t.age", []byte("66")) expectOk(t, err) expectUpdate("t.age", "66", 12) }) t.Run("watcher with start revision", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) _, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) _, err = kv.Put(ctx, "name", []byte("rip")) expectOk(t, err) _, err = kv.Put(ctx, "age", []byte("22")) expectOk(t, err) watcher, err := kv.WatchAll(ctx, jetstream.ResumeFromRevision(2)) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) // check that we get only updates after revision 2 expectUpdate("name", "rip", 2) expectUpdate("age", "22", 3) // stop first watcher watcher.Stop() _, err = kv.Put(ctx, "name2", []byte("ik")) expectOk(t, err) // create a new watcher with start revision 3 watcher, err = kv.WatchAll(ctx, jetstream.ResumeFromRevision(3)) expectOk(t, err) defer watcher.Stop() expectUpdate = expectUpdateF(t, watcher) // check that we get only updates after revision 3 expectUpdate("age", "22", 3) expectUpdate("name2", "ik", 4) }) t.Run("invalid watchers", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // empty keys _, err = kv.Watch(ctx, "") expectErr(t, err, jetstream.ErrInvalidKey) // invalid key _, err = kv.Watch(ctx, "a.>.b") expectErr(t, err, jetstream.ErrInvalidKey) _, err = kv.Watch(ctx, "foo.") expectErr(t, err, jetstream.ErrInvalidKey) // conflicting options _, err = kv.Watch(ctx, "foo", jetstream.IncludeHistory(), jetstream.UpdatesOnly()) expectErr(t, err, jetstream.ErrInvalidOption) }) t.Run("filtered watch with no filters", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // this should behave like WatchAll watcher, err := kv.WatchFiltered(ctx, []string{}) expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // Make sure we already got an initial value marker. expectInitDone() _, err = kv.Create(ctx, "name", []byte("derek")) expectOk(t, err) expectUpdate("name", "derek", 1) _, err = kv.Put(ctx, "name", []byte("rip")) expectOk(t, err) expectUpdate("name", "rip", 2) _, err = kv.Put(ctx, "name", []byte("ik")) expectOk(t, err) expectUpdate("name", "ik", 3) _, err = kv.Put(ctx, "age", []byte("22")) expectOk(t, err) expectUpdate("age", "22", 4) _, err = kv.Put(ctx, "age", []byte("33")) expectOk(t, err) expectUpdate("age", "33", 5) expectOk(t, kv.Delete(ctx, "age")) expectDelete("age", 6) }) t.Run("stop watcher should not block", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) watcher, err := kv.WatchAll(ctx) expectOk(t, err) expectInitDone := expectInitDoneF(t, watcher) expectInitDone() err = watcher.Stop() expectOk(t, err) select { case _, ok := <-watcher.Updates(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(100 * time.Millisecond): break } }) } func TestKeyValueWatchContext(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCHCTX"}) expectOk(t, err) watcher, err := kv.WatchAll(ctx) expectOk(t, err) defer watcher.Stop() // Trigger unsubscribe internally. cancel() // Wait for a bit for unsubscribe to be done. time.Sleep(500 * time.Millisecond) // Stopping watch that is already stopped via cancellation propagation is an error. err = watcher.Stop() if err == nil || err != nats.ErrBadSubscription { t.Errorf("Expected invalid subscription, got: %v", err) } } func TestKeyValueWatchContextUpdates(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCHCTX"}) expectOk(t, err) watcher, err := kv.WatchAll(ctx) expectOk(t, err) defer watcher.Stop() // Pull the initial state done marker which is nil. select { case v := <-watcher.Updates(): if v != nil { t.Fatalf("Expected nil marker, got %+v", v) } case <-time.After(time.Second): t.Fatalf("Did not receive nil marker like expected") } // Fire a timer and cancel the context after 250ms. time.AfterFunc(250*time.Millisecond, cancel) // Make sure canceling will break us out here. select { case <-watcher.Updates(): case <-time.After(5 * time.Second): t.Fatalf("Did not break out like expected") } } func TestKeyValueBindStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // Now bind to it.. _, err = js.KeyValue(ctx, "WATCH") expectOk(t, err) // Make sure we can't bind to a non-kv style stream. // We have some protection with stream name prefix. _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: "KV_TEST", Subjects: []string{"foo"}, }) expectOk(t, err) _, err = js.KeyValue(ctx, "TEST") expectErr(t, err) if err != jetstream.ErrBadBucket { t.Fatalf("Expected %v but got %v", jetstream.ErrBadBucket, err) } } func TestKeyValueDeleteStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) err = js.DeleteKeyValue(ctx, "WATCH") expectOk(t, err) // delete again should fail err = js.DeleteKeyValue(ctx, "WATCH") expectErr(t, err, jetstream.ErrBucketNotFound) // check that we're backwards compatible expectErr(t, err, jetstream.ErrStreamNotFound) _, err = js.KeyValue(ctx, "WATCH") expectErr(t, err, jetstream.ErrBucketNotFound) } func TestKeyValueDeleteVsPurge(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } // Put in a few names and ages. put("name", "derek") put("age", "22") put("name", "ivan") put("age", "33") put("name", "rip") put("age", "44") expectOk(t, kv.Delete(ctx, "age")) entries, err := kv.History(ctx, "age") expectOk(t, err) // Expect three entries and delete marker. if len(entries) != 4 { t.Fatalf("Expected 4 entries for age after delete, got %d", len(entries)) } err = kv.Purge(ctx, "name", jetstream.LastRevision(4)) expectErr(t, err) err = kv.Purge(ctx, "name", jetstream.LastRevision(5)) expectOk(t, err) // Check marker e, err := kv.Get(ctx, "name") expectErr(t, err, jetstream.ErrKeyNotFound) if e != nil { t.Fatalf("Expected a nil entry but got %v", e) } entries, err = kv.History(ctx, "name") expectOk(t, err) if len(entries) != 1 { t.Fatalf("Expected only 1 entry for age after delete, got %d", len(entries)) } // Make sure history also reports the purge operation. if e := entries[0]; e.Operation() != jetstream.KeyValuePurge { t.Fatalf("Expected a purge operation but got %v", e.Operation()) } } func TestKeyValueDeleteTombstones(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } v := strings.Repeat("ABC", 33) for i := 1; i <= 100; i++ { put(fmt.Sprintf("key-%d", i), v) } // Now delete them. for i := 1; i <= 100; i++ { err := kv.Delete(ctx, fmt.Sprintf("key-%d", i)) expectOk(t, err) } // Now cleanup. err = kv.PurgeDeletes(ctx, jetstream.DeleteMarkersOlderThan(-1)) expectOk(t, err) si, err := js.Stream(ctx, "KV_KVS") expectOk(t, err) if si.CachedInfo().State.Msgs != 0 { t.Fatalf("Expected no stream msgs to be left, got %d", si.CachedInfo().State.Msgs) } // Try with context ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() err = kv.PurgeDeletes(nats.Context(ctx)) expectOk(t, err) } func TestKeyValuePurgeDeletesMarkerThreshold(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } put("foo", "foo1") put("bar", "bar1") put("foo", "foo2") err = kv.Delete(ctx, "foo") expectOk(t, err) time.Sleep(200 * time.Millisecond) err = kv.Delete(ctx, "bar") expectOk(t, err) err = kv.PurgeDeletes(ctx, jetstream.DeleteMarkersOlderThan(100*time.Millisecond)) expectOk(t, err) // The key foo should have been completely cleared of the data // and the delete marker. fooEntries, err := kv.History(ctx, "foo") if err != jetstream.ErrKeyNotFound { t.Fatalf("Expected all entries for key foo to be gone, got err=%v entries=%v", err, fooEntries) } barEntries, err := kv.History(ctx, "bar") expectOk(t, err) if len(barEntries) != 1 { t.Fatalf("Expected 1 entry, got %v", barEntries) } if e := barEntries[0]; e.Operation() != jetstream.KeyValueDelete { t.Fatalf("Unexpected entry: %+v", e) } } func TestKeyValueKeys(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 2}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } _, err = kv.Keys(ctx) expectErr(t, err, jetstream.ErrNoKeysFound) // Put in a few names and ages. put("name", "derek") put("age", "22") put("country", "US") put("name", "ivan") put("age", "33") put("country", "US") put("name", "rip") put("age", "44") put("country", "MT") keys, err := kv.Keys(ctx) expectOk(t, err) kmap := make(map[string]struct{}) for _, key := range keys { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 3 { t.Fatalf("Expected 3 total keys, got %d", len(kmap)) } expected := map[string]struct{}{ "name": struct{}{}, "age": struct{}{}, "country": struct{}{}, } if !reflect.DeepEqual(kmap, expected) { t.Fatalf("Expected %+v but got %+v", expected, kmap) } // Make sure delete and purge do the right thing and not return the keys. err = kv.Delete(ctx, "name") expectOk(t, err) err = kv.Purge(ctx, "country") expectOk(t, err) keys, err = kv.Keys(ctx) expectOk(t, err) kmap = make(map[string]struct{}) for _, key := range keys { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 1 { t.Fatalf("Expected 1 total key, got %d", len(kmap)) } if _, ok := kmap["age"]; !ok { t.Fatalf("Expected %q to be only key present", "age") } } func TestKeyValueListKeys(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 2}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } // Put in a few names and ages. put("name", "derek") put("age", "22") put("country", "US") put("name", "ivan") put("age", "33") put("country", "US") put("name", "rip") put("age", "44") put("country", "MT") keys, err := kv.ListKeys(ctx) expectOk(t, err) kmap := make(map[string]struct{}) for key := range keys.Keys() { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 3 { t.Fatalf("Expected 3 total keys, got %d", len(kmap)) } expected := map[string]struct{}{ "name": struct{}{}, "age": struct{}{}, "country": struct{}{}, } if !reflect.DeepEqual(kmap, expected) { t.Fatalf("Expected %+v but got %+v", expected, kmap) } // Make sure delete and purge do the right thing and not return the keys. err = kv.Delete(ctx, "name") expectOk(t, err) err = kv.Purge(ctx, "country") expectOk(t, err) keys, err = kv.ListKeys(ctx) expectOk(t, err) kmap = make(map[string]struct{}) for key := range keys.Keys() { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 1 { t.Fatalf("Expected 1 total key, got %d", len(kmap)) } if _, ok := kmap["age"]; !ok { t.Fatalf("Expected %q to be only key present", "age") } } func TestListKeysFiltered(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Create Key-Value store. kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "KVS", History: 2}) expectOk(t, err) // Helper function to add key-value pairs. putKeys := func(data map[string]string) { for key, value := range data { t.Helper() _, err := kv.Put(ctx, key, []byte(value)) expectOk(t, err) } } // Add key-value pairs. putKeys(map[string]string{ "apple": "fruit", "banana": "fruit", "carrot": "vegetable", }) // Use filters to list keys matching "apple". filters := []string{"apple"} keyLister, err := kv.ListKeysFiltered(ctx, filters...) expectOk(t, err) // Collect filtered keys from KeyLister var filteredKeys []string for key := range keyLister.Keys() { filteredKeys = append(filteredKeys, key) } // Validate expected keys. expectedKeys := []string{"apple"} if len(filteredKeys) != len(expectedKeys) { t.Fatalf("Expected %d filtered key(s), got %d", len(expectedKeys), len(filteredKeys)) } for _, key := range expectedKeys { if !contains(filteredKeys, key) { t.Fatalf("Expected key %s in filtered keys, but not found", key) } } // delete apple so we can ensure we do not see it later after another filtered search err = kv.Delete(ctx, "apple") expectOk(t, err) filters = []string{"apple"} keyLister, err = kv.ListKeysFiltered(ctx, filters...) expectOk(t, err) // reset filtered keys filteredKeys = nil for key := range keyLister.Keys() { filteredKeys = append(filteredKeys, key) } if len(filteredKeys) != 0 { t.Fatalf("Expected 0 deleted keys, but %d was found", len(filteredKeys)) } } func contains(slice []string, key string) bool { for _, k := range slice { if k == key { return true } } return false } func TestKeyValueCrossAccounts(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: enabled accounts: { A: { users: [ {user: a, password: a} ] jetstream: enabled exports: [ {service: '$JS.API.>' } {service: '$KV.>'} {stream: 'accI.>'} ] }, I: { users: [ {user: i, password: i} ] imports: [ {service: {account: A, subject: '$JS.API.>'}, to: 'fromA.>' } {service: {account: A, subject: '$KV.>'}, to: 'fromA.$KV.>' } {stream: {subject: 'accI.>', account: A}} ] } }`)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) watchNext := func(w jetstream.KeyWatcher) jetstream.KeyValueEntry { t.Helper() select { case e := <-w.Updates(): return e case <-time.After(time.Second): t.Fatal("Fail to get the next update") } return nil } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() nc1, js1 := jsClient(t, s, nats.UserInfo("a", "a")) defer nc1.Close() kv1, err := js1.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "Map", History: 10}) if err != nil { t.Fatalf("Error creating kv store: %v", err) } w1, err := kv1.Watch(ctx, "map") if err != nil { t.Fatalf("Error creating watcher: %v", err) } if e := watchNext(w1); e != nil { t.Fatalf("Expected nil entry, got %+v", e) } nc2, err := nats.Connect(s.ClientURL(), nats.UserInfo("i", "i"), nats.CustomInboxPrefix("accI")) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc2.Close() js2, err := jetstream.NewWithAPIPrefix(nc2, "fromA") if err != nil { t.Fatalf("Error getting jetstream context: %v", err) } kv2, err := js2.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "Map", History: 10}) if err != nil { t.Fatalf("Error creating kv store: %v", err) } w2, err := kv2.Watch(ctx, "map") if err != nil { t.Fatalf("Error creating watcher: %v", err) } if e := watchNext(w2); e != nil { t.Fatalf("Expected nil entry, got %+v", e) } // Do a Put from kv2 rev, err := kv2.Put(ctx, "map", []byte("value")) if err != nil { t.Fatalf("Error on put: %v", err) } // Get from kv1 e, err := kv1.Get(ctx, "map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: +%v", e) } // Get from kv2 e, err = kv2.Get(ctx, "map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: +%v", e) } // Watcher 1 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: %+v", e) } // Watcher 2 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: %+v", e) } // Try an update form kv2 if _, err := kv2.Update(ctx, "map", []byte("updated"), rev); err != nil { t.Fatalf("Failed to update: %v", err) } // Get from kv1 e, err = kv1.Get(ctx, "map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: +%v", e) } // Get from kv2 e, err = kv2.Get(ctx, "map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: +%v", e) } // Watcher 1 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: %+v", e) } // Watcher 2 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: %+v", e) } // Purge from kv2 if err := kv2.Purge(ctx, "map"); err != nil { t.Fatalf("Error on purge: %v", err) } // Check purge ok from w1 if e := watchNext(w1); e == nil || e.Operation() != jetstream.KeyValuePurge { t.Fatalf("Unexpected entry: %+v", e) } // Check purge ok from w2 if e := watchNext(w2); e == nil || e.Operation() != jetstream.KeyValuePurge { t.Fatalf("Unexpected entry: %+v", e) } // Delete purge records from kv2 if err := kv2.PurgeDeletes(ctx, jetstream.DeleteMarkersOlderThan(-1)); err != nil { t.Fatalf("Error on purge deletes: %v", err) } // Check all gone from js1 if si, err := js1.Stream(ctx, "KV_Map"); err != nil || si == nil || si.CachedInfo().State.Msgs != 0 { t.Fatalf("Error getting stream info: err=%v si=%+v", err, si) } // Delete key from kv2 if err := kv2.Delete(ctx, "map"); err != nil { t.Fatalf("Error on delete: %v", err) } // Check key gone from kv1 if e, err := kv1.Get(ctx, "map"); err != jetstream.ErrKeyNotFound || e != nil { t.Fatalf("Expected key not found, got err=%v e=%+v", err, e) } } func TestKeyValueDuplicatesWindow(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() checkWindow := func(ttl, expectedDuplicates time.Duration) { t.Helper() _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST", History: 5, TTL: ttl}) expectOk(t, err) defer func() { expectOk(t, js.DeleteKeyValue(ctx, "TEST")) }() si, err := js.Stream(ctx, "KV_TEST") if err != nil { t.Fatalf("StreamInfo error: %v", err) } if si.CachedInfo().Config.Duplicates != expectedDuplicates { t.Fatalf("Expected duplicates to be %v, got %v", expectedDuplicates, si.CachedInfo().Config.Duplicates) } } checkWindow(0, 2*time.Minute) checkWindow(time.Hour, 2*time.Minute) checkWindow(5*time.Second, 5*time.Second) } func TestListKeyValueStores(t *testing.T) { tests := []struct { name string bucketsNum int }{ { name: "single page", bucketsNum: 5, }, { name: "multi page", bucketsNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() // create stream without the chunk subject, but with KV_ prefix _, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "KV_FOO", Subjects: []string{"FOO.*"}}) expectOk(t, err) // create stream with chunk subject, but without "KV_" prefix _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "FOO", Subjects: []string{"$KV.ABC.>"}}) expectOk(t, err) for i := 0; i < test.bucketsNum; i++ { _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: fmt.Sprintf("KVS_%d", i), MaxBytes: 1024}) expectOk(t, err) } names := make([]string, 0) kvNames := js.KeyValueStoreNames(ctx) for name := range kvNames.Name() { if strings.HasPrefix(name, "KV_") { t.Fatalf("Expected name without KV_ prefix, got %q", name) } names = append(names, name) } if kvNames.Error() != nil { t.Fatalf("Unexpected error: %v", kvNames.Error()) } if len(names) != test.bucketsNum { t.Fatalf("Invalid number of stream names; want: %d; got: %d", test.bucketsNum, len(names)) } infos := make([]nats.KeyValueStatus, 0) kvInfos := js.KeyValueStores(ctx) for info := range kvInfos.Status() { infos = append(infos, info) } if kvInfos.Error() != nil { t.Fatalf("Unexpected error: %v", kvNames.Error()) } if len(infos) != test.bucketsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.bucketsNum, len(infos)) } }) } } func TestKeyValueMirrorCrossDomains(t *testing.T) { keyExists := func(t *testing.T, kv jetstream.KeyValue, key string, expected string) jetstream.KeyValueEntry { var e jetstream.KeyValueEntry var err error checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { e, err = kv.Get(context.Background(), key) if err != nil { return err } if string(e.Value()) != expected { return fmt.Errorf("Expected value to be %q, got %q", expected, e.Value()) } return nil }) return e } keyDeleted := func(t *testing.T, kv jetstream.KeyValue, key string) { checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { _, err := kv.Get(context.Background(), key) if err == nil { return errors.New("Expected key to be gone") } if !errors.Is(err, jetstream.ErrKeyNotFound) { return err } return nil }) } conf := createConfFile(t, []byte(` server_name: HUB listen: 127.0.0.1:-1 jetstream: { domain: HUB } leafnodes { listen: 127.0.0.1:7422 } }`)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) lconf := createConfFile(t, []byte(` server_name: LEAF listen: 127.0.0.1:-1 jetstream: { domain:LEAF } leafnodes { remotes = [ { url: "leaf://127.0.0.1" } ] } }`)) defer os.Remove(lconf) ln, _ := RunServerWithConfig(lconf) defer shutdownJSServerAndRemoveStorage(t, ln) // Create main KV on HUB nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST"}) expectOk(t, err) _, err = kv.PutString(ctx, "name", "derek") expectOk(t, err) _, err = kv.PutString(ctx, "age", "22") expectOk(t, err) _, err = kv.PutString(ctx, "v", "v") expectOk(t, err) err = kv.Delete(ctx, "v") expectOk(t, err) lnc, ljs := jsClient(t, ln) defer lnc.Close() // Capture cfg so we can make sure it does not change. // NOTE: We use different name to test all possibilities, etc, but in practice for truly nomadic applications // this should be named the same, e.g. TEST. cfg := jetstream.KeyValueConfig{ Bucket: "MIRROR", Mirror: &jetstream.StreamSource{ Name: "TEST", Domain: "HUB", }, } ccfg := cfg _, err = ljs.CreateKeyValue(ctx, cfg) expectOk(t, err) if !reflect.DeepEqual(cfg, ccfg) { t.Fatalf("Did not expect config to be altered: %+v vs %+v", cfg, ccfg) } si, err := ljs.Stream(ctx, "KV_MIRROR") expectOk(t, err) // Make sure mirror direct set. if !si.CachedInfo().Config.MirrorDirect { t.Fatalf("Expected mirror direct to be set") } // Make sure we sync. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { si, err := ljs.Stream(ctx, "KV_MIRROR") expectOk(t, err) if si.CachedInfo().State.Msgs == 3 { return nil } return fmt.Errorf("Did not get synched messages: %d", si.CachedInfo().State.Msgs) }) // Bind locally from leafnode and make sure both get and put work. mkv, err := ljs.KeyValue(ctx, "MIRROR") expectOk(t, err) _, err = mkv.PutString(ctx, "name", "rip") expectOk(t, err) _, err = mkv.PutString(ctx, "v", "vv") expectOk(t, err) e := keyExists(t, kv, "v", "vv") if e.Operation() != jetstream.KeyValuePut { t.Fatalf("Got wrong value: %q vs %q", e.Operation(), nats.KeyValuePut) } err = mkv.Delete(ctx, "v") expectOk(t, err) keyDeleted(t, kv, "v") keyExists(t, kv, "name", "rip") // Also make sure we can create a watcher on the mirror KV. watcher, err := mkv.WatchAll(ctx) expectOk(t, err) defer watcher.Stop() // Bind through leafnode connection but to origin KV. rjs, err := jetstream.NewWithDomain(nc, "HUB") expectOk(t, err) rkv, err := rjs.KeyValue(ctx, "TEST") expectOk(t, err) _, err = rkv.PutString(ctx, "name", "ivan") expectOk(t, err) keyExists(t, mkv, "name", "ivan") _, err = rkv.PutString(ctx, "v", "vv") expectOk(t, err) e = keyExists(t, mkv, "v", "vv") if e.Operation() != jetstream.KeyValuePut { t.Fatalf("Got wrong value: %q vs %q", e.Operation(), nats.KeyValuePut) } err = rkv.Delete(ctx, "v") expectOk(t, err) keyDeleted(t, mkv, "v") // Shutdown cluster and test get still work. shutdownJSServerAndRemoveStorage(t, s) keyExists(t, mkv, "name", "ivan") } func TestKeyValueRePublish(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "TEST_UPDATE", }); err != nil { t.Fatalf("Error creating store: %v", err) } // This is expected to fail since server does not support as of now // the update of RePublish. if _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "TEST_UPDATE", RePublish: &jetstream.RePublish{Source: ">", Destination: "bar.>"}, }); err == nil { t.Fatal("Expected failure, did not get one") } kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "TEST", RePublish: &jetstream.RePublish{Source: ">", Destination: "bar.>"}, }) if err != nil { t.Fatalf("Error creating store: %v", err) } si, err := js.Stream(ctx, "KV_TEST") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if si.CachedInfo().Config.RePublish == nil { t.Fatal("Expected republish to be set, it was not") } sub, err := nc.SubscribeSync("bar.>") if err != nil { t.Fatalf("Error on sub: %v", err) } if _, err := kv.Put(ctx, "foo", []byte("value")); err != nil { t.Fatalf("Error on put: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next: %v", err) } if v := string(msg.Data); v != "value" { t.Fatalf("Unexpected value: %s", v) } // The message should also have a header with the actual subject expected := "$KV.TEST.foo" if v := msg.Header.Get(jetstream.SubjectHeader); v != expected { t.Fatalf("Expected subject header %q, got %q", expected, v) } } func TestKeyValueMirrorDirectGet(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "TEST"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: "MIRROR", Mirror: &jetstream.StreamSource{Name: "KV_TEST"}, MirrorDirect: true, }) if err != nil { t.Fatalf("Error creating mirror: %v", err) } for i := 0; i < 100; i++ { key := fmt.Sprintf("KEY.%d", i) if _, err := kv.PutString(ctx, key, "42"); err != nil { t.Fatalf("Error adding key: %v", err) } } // Make sure all gets work. for i := 0; i < 100; i++ { if _, err := kv.Get(ctx, "KEY.22"); err != nil { t.Fatalf("Got error getting key: %v", err) } } } func TestKeyValueCreate(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "TEST", Description: "Test KV", MaxValueSize: 128, History: 10, TTL: 1 * time.Hour, MaxBytes: 1024, Storage: jetstream.FileStorage, }) if err != nil { t.Fatalf("Error creating kv: %v", err) } expectedStreamConfig := jetstream.StreamConfig{ Name: "KV_TEST", Description: "Test KV", Subjects: []string{"$KV.TEST.>"}, MaxMsgs: -1, MaxBytes: 1024, Discard: jetstream.DiscardNew, MaxAge: 1 * time.Hour, MaxMsgsPerSubject: 10, MaxMsgSize: 128, Storage: jetstream.FileStorage, DenyDelete: true, AllowRollup: true, AllowDirect: true, MaxConsumers: -1, Replicas: 1, Duplicates: 2 * time.Minute, } stream, err := js.Stream(ctx, "KV_TEST") if err != nil { t.Fatalf("Error getting stream: %v", err) } // server will set metadata values, so we need to clear them stream.CachedInfo().Config.Metadata = nil if !reflect.DeepEqual(stream.CachedInfo().Config, expectedStreamConfig) { t.Fatalf("Expected stream config to be %+v, got %+v", expectedStreamConfig, stream.CachedInfo().Config) } _, err = kv.Create(ctx, "key", []byte("1")) if err != nil { t.Fatalf("Error creating key: %v", err) } _, err = kv.Create(ctx, "key", []byte("1")) expected := "wrong last sequence: 1: key exists" if !strings.Contains(err.Error(), expected) { t.Fatalf("Expected %q, got: %v", expected, err) } if !errors.Is(err, jetstream.ErrKeyExists) { t.Fatalf("Expected ErrKeyExists, got: %v", err) } aerr := &jetstream.APIError{} if !errors.As(err, &aerr) { t.Fatalf("Expected APIError, got: %v", err) } if aerr.Description != "wrong last sequence: 1" { t.Fatalf("Unexpected APIError message, got: %v", aerr.Description) } if aerr.ErrorCode != 10071 { t.Fatalf("Unexpected error code, got: %v", aerr.ErrorCode) } if aerr.Code != jetstream.ErrKeyExists.APIError().Code { t.Fatalf("Unexpected error code, got: %v", aerr.Code) } var kerr jetstream.JetStreamError if !errors.As(err, &kerr) { t.Fatalf("Expected KeyValueError, got: %v", err) } if kerr.APIError().ErrorCode != 10071 { t.Fatalf("Unexpected error code, got: %v", kerr.APIError().ErrorCode) } } // Helpers func client(t *testing.T, s *server.Server, opts ...nats.Option) *nats.Conn { t.Helper() nc, err := nats.Connect(s.ClientURL(), opts...) if err != nil { t.Fatalf("Unexpected error: %v", err) } return nc } func jsClient(t *testing.T, s *server.Server, opts ...nats.Option) (*nats.Conn, jetstream.JetStream) { t.Helper() nc := client(t, s, opts...) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error getting JetStream context: %v", err) } return nc, js } func expectOk(t *testing.T, err error) { t.Helper() if err != nil { t.Fatalf("Unexpected error: %v", err) } } func expectErr(t *testing.T, err error, expected ...error) { t.Helper() if err == nil { t.Fatalf("Expected error but got none") } if len(expected) == 0 { return } for _, e := range expected { if errors.Is(err, e) { return } } t.Fatalf("Expected one of %+v, got '%v'", expected, err) } func TestKeyValueCompression(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() kvCompressed, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "A", Compression: true, }) if err != nil { t.Fatalf("Error creating kv: %v", err) } status, err := kvCompressed.Status(ctx) if err != nil { t.Fatalf("Error getting bucket status: %v", err) } if !status.IsCompressed() { t.Fatalf("Expected bucket to be compressed") } kvStream, err := js.Stream(ctx, "KV_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if kvStream.CachedInfo().Config.Compression != jetstream.S2Compression { t.Fatalf("Expected stream to be compressed with S2") } } func TestKeyValueCreateRepairOldKV(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // create a standard kv _, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "A", }) if err != nil { t.Fatalf("Error creating kv: %v", err) } // get stream config and set discard policy to old and AllowDirect to false stream, err := js.Stream(ctx, "KV_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } streamCfg := stream.CachedInfo().Config streamCfg.Discard = jetstream.DiscardOld streamCfg.AllowDirect = false // create a new kv with the same name - client should fix the config _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "A", }) if err != nil { t.Fatalf("Error creating kv: %v", err) } // get stream config again and check if the discard policy is set to new stream, err = js.Stream(ctx, "KV_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if stream.CachedInfo().Config.Discard != jetstream.DiscardNew { t.Fatalf("Expected stream to have discard policy set to new") } if !stream.CachedInfo().Config.AllowDirect { t.Fatalf("Expected stream to have AllowDirect set to true") } // attempting to create a new kv with the same name and different settings should fail _, err = js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ Bucket: "A", Description: "New KV", }) if !errors.Is(err, jetstream.ErrBucketExists) { t.Fatalf("Expected error to be ErrBucketExists, got: %v", err) } } nats.go-1.41.0/jetstream/test/main_test.go000066400000000000000000000013011477351342400204420ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "testing" "go.uber.org/goleak" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } nats.go-1.41.0/jetstream/test/message_test.go000066400000000000000000000262421477351342400211550ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestMessageDetails(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "FOO.1", []byte("msg"), jetstream.WithMsgID("123")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } if string(msg.Data()) != "msg" { t.Fatalf("Invalid message body; want: 'msg'; got: %q", string(msg.Data())) } metadata, err := msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %v", err) } if metadata.Consumer != "cons" || metadata.Stream != "foo" { t.Fatalf("Invalid message metadata: %v", metadata) } if val, ok := msg.Headers()["Nats-Msg-Id"]; !ok || val[0] != "123" { t.Fatalf("Invalid message headers: %v", msg.Headers()) } if msg.Subject() != "FOO.1" { t.Fatalf("Invalid message subject: %q", msg.Subject()) } } func TestAckVariants(t *testing.T) { setup := func(ctx context.Context, t *testing.T) (*server.Server, *nats.Conn, jetstream.JetStream, jetstream.Consumer) { srv := RunBasicJetStreamServer() nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Durable: "cons", AckPolicy: jetstream.AckExplicitPolicy, Description: "test consumer", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv, nc, js, c } t.Run("standard ack", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "+ACK" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("ack twice", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.Ack(); err == nil || !errors.Is(err, jetstream.ErrMsgAlreadyAckd) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgAlreadyAckd, err) } }) t.Run("double ack", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.DoubleAck(ctx); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "+ACK" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("standard nak", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.Nak(); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "-NAK" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("nak with delay", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.NakWithDelay(123 * time.Nanosecond); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != `-NAK {"delay": 123}` { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("term", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.Term(); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "+TERM" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("term with reason", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.TermWithReason("with reason"); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "+TERM with reason" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) t.Run("in progress", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, nc, js, c := setup(ctx, t) defer shutdownJSServerAndRemoveStorage(t, srv) defer nc.Close() if _, err := js.Publish(ctx, "FOO.1", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { t.Fatalf("No messages available") } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } sub, err := nc.SubscribeSync(msg.Reply()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := msg.InProgress(); err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(ack.Data) != "+WPI" { t.Fatalf("Invalid ack body: %q", string(ack.Data)) } }) } nats.go-1.41.0/jetstream/test/object_test.go000066400000000000000000001047151477351342400210010ustar00rootroot00000000000000// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "context" "crypto/rand" "crypto/sha256" "fmt" "io" "os" "path" "path/filepath" "reflect" "strings" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestObjectBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() _, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "notok!", Description: "testing"}) expectErr(t, err, jetstream.ErrInvalidStoreName) obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS", Description: "testing"}) expectOk(t, err) // Create ~16MB object. blob := make([]byte, 16*1024*1024+22) _, err = rand.Read(blob) expectOk(t, err) now := time.Now().UTC().Round(time.Second) _, err = obs.PutBytes(ctx, "BLOB", blob) expectOk(t, err) // Test info info, err := obs.GetInfo(ctx, "BLOB") expectOk(t, err) if len(info.NUID) == 0 { t.Fatalf("Expected object to have a NUID") } if info.ModTime.IsZero() { t.Fatalf("Expected object to have a non-zero ModTime") } if mt := info.ModTime.Round(time.Second); mt.Sub(now) != 0 && mt.Sub(now) != time.Second { t.Fatalf("Expected ModTime to be about %v, got %v", now, mt) } // Make sure the stream is sealed. err = obs.Seal(ctx) expectOk(t, err) si, err := js.Stream(ctx, "OBJ_OBJS") expectOk(t, err) if !si.CachedInfo().Config.Sealed { t.Fatalf("Expected the object stream to be sealed, got %+v", si) } status, err := obs.Status(ctx) expectOk(t, err) if !status.Sealed() { t.Fatalf("expected sealed status") } if status.Size() == 0 { t.Fatalf("size is 0") } if status.Storage() != jetstream.FileStorage { t.Fatalf("stauts reports %d storage", status.Storage()) } if status.Description() != "testing" { t.Fatalf("invalid description: '%s'", status.Description()) } // Now get the object back. result, err := obs.Get(ctx, "BLOB") expectOk(t, err) expectOk(t, result.Error()) defer result.Close() // Check info. info, err = result.Info() expectOk(t, err) if info.Size != uint64(len(blob)) { t.Fatalf("Size does not match, %d vs %d", info.Size, len(blob)) } // Check result. copy, err := io.ReadAll(result) expectOk(t, err) if !bytes.Equal(copy, blob) { t.Fatalf("Result not the same") } // Check simple errors. _, err = obs.Get(ctx, "FOO") expectErr(t, err, jetstream.ErrObjectNotFound) _, err = obs.Get(ctx, "") expectErr(t, err, jetstream.ErrNameRequired) _, err = obs.PutBytes(ctx, "", blob) expectErr(t, err, jetstream.ErrBadObjectMeta) // Test delete. err = js.DeleteObjectStore(ctx, "OBJS") expectOk(t, err) _, err = js.ObjectStore(ctx, "BLOB") expectErr(t, err, jetstream.ErrBucketNotFound) } func TestCreateObjectStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // invalid bucket name _, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST.", Description: "Test store"}) expectErr(t, err, jetstream.ErrInvalidStoreName) _, err = js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "Test store"}) expectOk(t, err) // Check that we can't overwrite existing bucket. _, err = js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "New store"}) expectErr(t, err, jetstream.ErrBucketExists) // assert that we're backwards compatible expectErr(t, err, jetstream.ErrStreamNameAlreadyInUse) } func TestUpdateObjectStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // cannot update a non-existing bucket _, err := js.UpdateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "Test store"}) expectErr(t, err, jetstream.ErrBucketNotFound) _, err = js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "Test store"}) expectOk(t, err) // update the bucket _, err = js.UpdateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "New store"}) expectOk(t, err) } func TestCreateOrUpdateObjectStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // invalid bucket name _, err := js.CreateOrUpdateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST.", Description: "Test store"}) expectErr(t, err, jetstream.ErrInvalidStoreName) _, err = js.CreateOrUpdateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "Test store"}) expectOk(t, err) // update the bucket _, err = js.CreateOrUpdateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST", Description: "New store"}) expectOk(t, err) } func TestGetObjectDigestMismatch(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "FOO"}) expectOk(t, err) _, err = obs.PutString(ctx, "A", "abc") expectOk(t, err) res, err := obs.Get(ctx, "A") expectOk(t, err) // first read should be successful data, err := io.ReadAll(res) expectOk(t, err) if string(data) != "abc" { t.Fatalf("Expected result: 'abc'; got: %s", string(data)) } info, err := obs.GetInfo(ctx, "A") expectOk(t, err) // add new chunk after using Put(), this will change the digest hash on Get() _, err = js.Publish(ctx, fmt.Sprintf("$O.FOO.C.%s", info.NUID), []byte("123")) expectOk(t, err) res, err = obs.Get(ctx, "A") expectOk(t, err) _, err = io.ReadAll(res) expectErr(t, err, jetstream.ErrDigestMismatch) expectErr(t, res.Error(), jetstream.ErrDigestMismatch) } func TestDefaultObjectStatus(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS", Description: "testing"}) expectOk(t, err) blob := make([]byte, 1024*1024+22) _, err = rand.Read(blob) expectOk(t, err) _, err = obs.PutBytes(ctx, "BLOB", blob) expectOk(t, err) status, err := obs.Status(ctx) expectOk(t, err) if status.BackingStore() != "JetStream" { t.Fatalf("invalid backing store kind: %s", status.BackingStore()) } bs := status.(*jetstream.ObjectBucketStatus) info := bs.StreamInfo() if info.Config.Name != "OBJ_OBJS" { t.Fatalf("invalid stream name %+v", info) } } func TestObjectFileBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "FILES"}) expectOk(t, err) // Create ~8MB object. blob := make([]byte, 8*1024*1024+33) _, err = rand.Read(blob) expectOk(t, err) tmpFile, err := os.CreateTemp("", "objfile") expectOk(t, err) defer os.Remove(tmpFile.Name()) // clean up err = os.WriteFile(tmpFile.Name(), blob, 0600) expectOk(t, err) _, err = obs.PutFile(ctx, tmpFile.Name()) expectOk(t, err) tmpResult, err := os.CreateTemp("", "objfileresult") expectOk(t, err) defer os.Remove(tmpResult.Name()) // clean up err = obs.GetFile(ctx, tmpFile.Name(), tmpResult.Name()) expectOk(t, err) // Make sure they are the same. original, err := os.ReadFile(tmpFile.Name()) expectOk(t, err) restored, err := os.ReadFile(tmpResult.Name()) expectOk(t, err) if !bytes.Equal(original, restored) { t.Fatalf("Files did not match") } } func TestObjectMulti(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "TEST_FILES"}) expectOk(t, err) numFiles := 0 fis, _ := os.ReadDir(".") for _, fi := range fis { fn := fi.Name() // Just grab clean test files. if filepath.Ext(fn) != ".go" || fn[0] == '.' || fn[0] == '#' { continue } _, err = obs.PutFile(ctx, fn) expectOk(t, err) numFiles++ } expectOk(t, obs.Seal(ctx)) _, err = js.Stream(ctx, "OBJ_TEST_FILES") expectOk(t, err) result, err := obs.Get(ctx, "object_test.go") expectOk(t, err) expectOk(t, result.Error()) defer result.Close() _, err = result.Info() expectOk(t, err) copy, err := io.ReadAll(result) expectOk(t, err) orig, err := os.ReadFile(path.Join(".", "object_test.go")) expectOk(t, err) if !bytes.Equal(orig, copy) { t.Fatalf("Files did not match") } } func TestObjectDeleteMarkers(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) msg := bytes.Repeat([]byte("A"), 100) _, err = obs.PutBytes(ctx, "A", msg) expectOk(t, err) err = obs.Delete(ctx, "A") expectOk(t, err) si, err := js.Stream(ctx, "OBJ_OBJS") expectOk(t, err) // We should have one message left, the "delete" marker. if si.CachedInfo().State.Msgs != 1 { t.Fatalf("Expected 1 marker msg, got %d msgs", si.CachedInfo().State.Msgs) } // For deleted object return error _, err = obs.GetInfo(ctx, "A") expectErr(t, err, jetstream.ErrObjectNotFound) _, err = obs.Get(ctx, "A") expectErr(t, err, jetstream.ErrObjectNotFound) info, err := obs.GetInfo(ctx, "A", jetstream.GetObjectInfoShowDeleted()) expectOk(t, err) // Make sure we have a delete marker, this will be there to drive Watch functionality. if !info.Deleted { t.Fatalf("Expected info to be marked as deleted") } _, err = obs.Get(ctx, "A", jetstream.GetObjectShowDeleted()) expectOk(t, err) } func TestObjectMultiWithDelete(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "2OD"}) expectOk(t, err) pa := bytes.Repeat([]byte("A"), 2_000_000) pb := bytes.Repeat([]byte("B"), 3_000_000) _, err = obs.PutBytes(ctx, "A", pa) expectOk(t, err) // Hold onto this so we can make sure DeleteObject clears all messages, chunks and meta. si, err := js.Stream(ctx, "OBJ_2OD") expectOk(t, err) _, err = obs.PutBytes(ctx, "B", pb) expectOk(t, err) pb2, err := obs.GetBytes(ctx, "B") expectOk(t, err) if !bytes.Equal(pb, pb2) { t.Fatalf("Did not retrieve same object") } // Now delete B err = obs.Delete(ctx, "B") expectOk(t, err) siad, err := js.Stream(ctx, "OBJ_2OD") expectOk(t, err) if siad.CachedInfo().State.Msgs != si.CachedInfo().State.Msgs+1 { // +1 more delete marker. t.Fatalf("Expected to have %d msgs after delete, got %d", siad.CachedInfo().State.Msgs, si.CachedInfo().State.Msgs+1) } } func TestObjectNames(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) // Test filename like naming. _, err = obs.PutString(ctx, "BLOB.txt", "A") expectOk(t, err) // Spaces ok _, err = obs.PutString(ctx, "foo bar", "A") expectOk(t, err) // things that can be in a filename across multiple OSes // dot, asterisk, lt, gt, colon, double-quote, fwd-slash, backslash, pipe, question-mark, ampersand _, err = obs.PutString(ctx, ".*<>:\"/\\|?&", "A") expectOk(t, err) // Errors _, err = obs.PutString(ctx, "", "A") expectErr(t, err, jetstream.ErrBadObjectMeta) } func TestObjectMetadata(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() bucketMetadata := map[string]string{"foo": "bar", "baz": "boo"} obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{ Bucket: "META-TEST", Metadata: bucketMetadata, }) expectOk(t, err) status, err := obs.Status(ctx) expectOk(t, err) for k, v := range bucketMetadata { if status.Metadata()[k] != v { t.Fatalf("invalid bucket metadata: %+v", status.Metadata()) } } // Simple with no Meta. _, err = obs.PutString(ctx, "A", "AAA") expectOk(t, err) buf := bytes.NewBufferString("CCC") objectMetadata := map[string]string{"name": "C", "description": "descC"} info, err := obs.Put(ctx, jetstream.ObjectMeta{Name: "C", Metadata: objectMetadata}, buf) expectOk(t, err) if !reflect.DeepEqual(info.Metadata, objectMetadata) { t.Fatalf("invalid object metadata: %+v", info.Metadata) } meta := jetstream.ObjectMeta{Name: "A"} meta.Description = "descA" meta.Headers = make(nats.Header) meta.Headers.Set("color", "blue") objectMetadata["description"] = "updated desc" objectMetadata["version"] = "0.1" meta.Metadata = objectMetadata // simple update that does not change the name, just adds data err = obs.UpdateMeta(ctx, "A", meta) expectOk(t, err) info, err = obs.GetInfo(ctx, "A") expectOk(t, err) if info.Name != "A" || info.Description != "descA" || info.Headers == nil || info.Headers.Get("color") != "blue" || !reflect.DeepEqual(info.Metadata, objectMetadata) { t.Fatalf("Update failed: %+v", info) } // update that changes the name and some data meta = jetstream.ObjectMeta{Name: "B"} meta.Description = "descB" meta.Headers = make(nats.Header) meta.Headers.Set("color", "red") meta.Metadata = nil err = obs.UpdateMeta(ctx, "A", meta) expectOk(t, err) _, err = obs.GetInfo(ctx, "A") if err == nil { t.Fatal("Object meta for original name was not removed.") } info, err = obs.GetInfo(ctx, "B") expectOk(t, err) if info.Name != "B" || info.Description != "descB" || info.Headers == nil || info.Headers.Get("color") != "red" || info.Metadata != nil { t.Fatalf("Update failed: %+v", info) } // Change meta name to existing object's name meta = jetstream.ObjectMeta{Name: "C"} err = obs.UpdateMeta(ctx, "B", meta) expectErr(t, err, jetstream.ErrObjectAlreadyExists) err = obs.Delete(ctx, "C") expectOk(t, err) err = obs.UpdateMeta(ctx, "B", meta) expectOk(t, err) // delete the object to test updating against a deleted object err = obs.Delete(ctx, "C") expectOk(t, err) err = obs.UpdateMeta(ctx, "C", meta) expectErr(t, err, jetstream.ErrUpdateMetaDeleted) err = obs.UpdateMeta(ctx, "X", meta) if err == nil { t.Fatal("Expected an error when trying to update an object that does not exist.") } // can't have a link when putting an object meta.Opts = &jetstream.ObjectMetaOptions{Link: &jetstream.ObjectLink{Bucket: "DoesntMatter"}} _, err = obs.Put(ctx, meta, nil) expectErr(t, err, jetstream.ErrLinkNotAllowed) } func TestObjectWatch(t *testing.T) { expectUpdateF := func(t *testing.T, watcher jetstream.ObjectWatcher) func(name string) { return func(name string) { t.Helper() select { case info := <-watcher.Updates(): if false && info.Name != name { // TODO what is supposed to happen here? t.Fatalf("Expected update for %q, but got %+v", name, info) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectNoMoreUpdatesF := func(t *testing.T, watcher jetstream.ObjectWatcher) func() { return func() { t.Helper() select { case info := <-watcher.Updates(): t.Fatalf("Got an unexpected update: %+v", info) case <-time.After(100 * time.Millisecond): } } } expectInitDoneF := func(t *testing.T, watcher jetstream.ObjectWatcher) func() { return func() { t.Helper() select { case info := <-watcher.Updates(): if info != nil { t.Fatalf("Did not get expected: %+v", info) } case <-time.After(time.Second): t.Fatalf("Did not receive a init done like expected") } } } t.Run("default watcher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) watcher, err := obs.Watch(ctx) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectNoMoreUpdates := expectNoMoreUpdatesF(t, watcher) expectInitDone := expectInitDoneF(t, watcher) // We should get a marker that is nil when all initial values are delivered. expectInitDone() _, err = obs.PutString(ctx, "A", "AAA") expectOk(t, err) _, err = obs.PutString(ctx, "B", "BBB") expectOk(t, err) // Initial Values. expectUpdate("A") expectUpdate("B") expectNoMoreUpdates() // Delete err = obs.Delete(ctx, "A") expectOk(t, err) expectUpdate("A") expectNoMoreUpdates() // New _, err = obs.PutString(ctx, "C", "CCC") expectOk(t, err) // Update Meta deletedInfo, err := obs.GetInfo(ctx, "A", jetstream.GetObjectInfoShowDeleted()) expectOk(t, err) if !deletedInfo.Deleted { t.Fatalf("Expected object to be deleted.") } meta := deletedInfo.ObjectMeta meta.Description = "Making a change." err = obs.UpdateMeta(ctx, "A", meta) expectErr(t, err, jetstream.ErrUpdateMetaDeleted) }) t.Run("watcher with update", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) _, err = obs.PutString(ctx, "A", "AAA") expectOk(t, err) _, err = obs.PutString(ctx, "B", "BBB") expectOk(t, err) watcher, err := obs.Watch(ctx, jetstream.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectNoMoreUpdates := expectNoMoreUpdatesF(t, watcher) // when listening for updates only, we should not receive anything when watcher is started expectNoMoreUpdates() // Delete err = obs.Delete(ctx, "A") expectOk(t, err) expectUpdate("A") expectNoMoreUpdates() // New _, err = obs.PutString(ctx, "C", "CCC") expectOk(t, err) expectUpdate("C") }) t.Run("stop watcher should close the channel", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) watcher, err := obs.Watch(ctx) expectOk(t, err) expectInitDone := expectInitDoneF(t, watcher) expectInitDone() err = watcher.Stop() expectOk(t, err) select { case _, ok := <-watcher.Updates(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(100 * time.Millisecond): return } }) } func TestObjectLinks(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() root, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "ROOT"}) expectOk(t, err) _, err = root.PutString(ctx, "A", "AAA") expectOk(t, err) _, err = root.PutString(ctx, "B", "BBB") expectOk(t, err) infoA, err := root.GetInfo(ctx, "A") expectOk(t, err) // Link to individual object. infoLA, err := root.AddLink(ctx, "LA", infoA) expectOk(t, err) expectLinkIsCorrect(t, infoA, infoLA) // link to a link _, err = root.AddLink(ctx, "LALA", infoLA) expectErr(t, err, jetstream.ErrNoLinkToLink) dir, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "DIR"}) expectOk(t, err) _, err = dir.PutString(ctx, "DIR/A", "DIR-AAA") expectOk(t, err) _, err = dir.PutString(ctx, "DIR/B", "DIR-BBB") expectOk(t, err) infoB, err := dir.GetInfo(ctx, "DIR/B") expectOk(t, err) infoLB, err := root.AddLink(ctx, "DBL", infoB) expectOk(t, err) expectLinkIsCorrect(t, infoB, infoLB) // Now add whole other store as a link, like a directory. infoBucketLink, err := root.AddBucketLink(ctx, "dir", dir) expectOk(t, err) _, err = root.Get(ctx, infoBucketLink.Name) expectErr(t, err, jetstream.ErrCantGetBucket) expectLinkPartsAreCorrect(t, infoBucketLink, "DIR", "") // Try to get a linked object, same bucket getLA, err := root.GetString(ctx, "LA") expectOk(t, err) if getLA != "AAA" { t.Fatalf("Expected %q but got %q", "AAA", getLA) } // Try to get a linked object, cross bucket getDbl, err := root.GetString(ctx, "DBL") expectOk(t, err) if getDbl != "DIR-BBB" { t.Fatalf("Expected %q but got %q", "DIR-BBB", getDbl) } // change a link infoB, err = root.GetInfo(ctx, "B") expectOk(t, err) infoLA, err = root.GetInfo(ctx, "LA") expectOk(t, err) expectLinkIsCorrect(t, infoA, infoLA) infoLA, err = root.AddLink(ctx, "LA", infoB) expectOk(t, err) expectLinkIsCorrect(t, infoB, infoLA) // change a bucket link infoBucketLink, err = root.GetInfo(ctx, "dir") expectOk(t, err) expectLinkPartsAreCorrect(t, infoBucketLink, "DIR", "") infoBucketLink, err = root.AddBucketLink(ctx, "dir", root) expectOk(t, err) expectLinkPartsAreCorrect(t, infoBucketLink, "ROOT", "") // Check simple errors. _, err = root.AddLink(ctx, "", infoB) expectErr(t, err, jetstream.ErrNameRequired) // A is already an object _, err = root.AddLink(ctx, "A", infoB) expectErr(t, err, jetstream.ErrObjectAlreadyExists) _, err = root.AddLink(ctx, "Nil Object", nil) expectErr(t, err, jetstream.ErrObjectRequired) infoB.Name = "" _, err = root.AddLink(ctx, "Empty Info Name", infoB) expectErr(t, err, jetstream.ErrObjectRequired) // Check Error Link to a Link _, err = root.AddLink(ctx, "Link To Link", infoLB) expectErr(t, err, jetstream.ErrNoLinkToLink) // Check Errors on bucket linking _, err = root.AddBucketLink(ctx, "", root) expectErr(t, err, jetstream.ErrNameRequired) _, err = root.AddBucketLink(ctx, "Nil Bucket", nil) expectErr(t, err, jetstream.ErrBucketRequired) err = root.Delete(ctx, "A") expectOk(t, err) _, err = root.AddLink(ctx, "ToDeletedStale", infoA) expectOk(t, err) // TODO deal with this in the code somehow infoA, err = root.GetInfo(ctx, "A", jetstream.GetObjectInfoShowDeleted()) expectOk(t, err) _, err = root.AddLink(ctx, "ToDeletedFresh", infoA) expectErr(t, err, jetstream.ErrNoLinkToDeleted) } func expectLinkIsCorrect(t *testing.T, originalObject *jetstream.ObjectInfo, linkObject *jetstream.ObjectInfo) { if linkObject.Opts.Link == nil || !expectLinkPartsAreCorrect(t, linkObject, originalObject.Bucket, originalObject.Name) { t.Fatalf("Link info not what was expected:\nActual: %+v\nTarget: %+v", linkObject, originalObject) } } func expectLinkPartsAreCorrect(t *testing.T, linkObject *jetstream.ObjectInfo, bucket, name string) bool { return linkObject.Opts.Link.Bucket == bucket && linkObject.Opts.Link.Name == name && !linkObject.ModTime.IsZero() && linkObject.NUID != "" } // Right now no history, just make sure we are cleaning up after ourselves. func TestObjectHistory(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) info, err := obs.PutBytes(ctx, "A", bytes.Repeat([]byte("A"), 10)) expectOk(t, err) if info.Size != 10 { t.Fatalf("Invalid first put when testing history %+v", info) } info, err = obs.PutBytes(ctx, "A", bytes.Repeat([]byte("a"), 20)) expectOk(t, err) if info.Size != 20 { t.Fatalf("Invalid second put when testing history %+v", info) } // Should only be 1 copy of 'A', so 1 data and 1 meta since history was not selected. si, err := js.Stream(ctx, "OBJ_OBJS") expectOk(t, err) if si.CachedInfo().State.Msgs != 2 { t.Fatalf("Expected 2 msgs (1 data 1 meta) but got %d", si.CachedInfo().State.Msgs) } } func TestObjectList(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() root, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "ROOT"}) expectOk(t, err) _, err = root.List(ctx) expectErr(t, err, jetstream.ErrNoObjectsFound) put := func(name, value string) { _, err = root.PutString(ctx, name, value) expectOk(t, err) } put("A", "AAA") put("B", "BBB") put("C", "CCC") put("B", "bbb") // Self link info, err := root.GetInfo(ctx, "B") expectOk(t, err) _, err = root.AddLink(ctx, "b", info) expectOk(t, err) put("D", "DDD") err = root.Delete(ctx, "D") expectOk(t, err) t.Run("without deleted objects", func(t *testing.T) { lch, err := root.List(ctx) expectOk(t, err) omap := make(map[string]struct{}) for _, info := range lch { if _, ok := omap[info.Name]; ok { t.Fatalf("Already saw %q", info.Name) } omap[info.Name] = struct{}{} } if len(omap) != 4 { t.Fatalf("Expected 4 total objects, got %d", len(omap)) } expected := map[string]struct{}{ "A": struct{}{}, "B": struct{}{}, "C": struct{}{}, "b": struct{}{}, } if !reflect.DeepEqual(omap, expected) { t.Fatalf("Expected %+v but got %+v", expected, omap) } }) t.Run("with deleted objects", func(t *testing.T) { lch, err := root.List(ctx, jetstream.ListObjectsShowDeleted()) expectOk(t, err) res := make([]string, 0) for _, info := range lch { res = append(res, info.Name) } if len(res) != 5 { t.Fatalf("Expected 5 total objects, got %d", len(res)) } expected := []string{"A", "C", "B", "b", "D"} if !reflect.DeepEqual(res, expected) { t.Fatalf("Expected %+v but got %+v", expected, res) } }) t.Run("with context", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() lch, err := root.List(ctx) expectOk(t, err) omap := make(map[string]struct{}) for _, info := range lch { if _, ok := omap[info.Name]; ok { t.Fatalf("Already saw %q", info.Name) } omap[info.Name] = struct{}{} } if len(omap) != 4 { t.Fatalf("Expected 4 total objects, got %d", len(omap)) } expected := map[string]struct{}{ "A": struct{}{}, "B": struct{}{}, "C": struct{}{}, "b": struct{}{}, } if !reflect.DeepEqual(omap, expected) { t.Fatalf("Expected %+v but got %+v", expected, omap) } }) } func TestObjectMaxBytes(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "OBJS", MaxBytes: 1024}) expectOk(t, err) status, err := obs.Status(ctx) expectOk(t, err) bs := status.(*jetstream.ObjectBucketStatus) info := bs.StreamInfo() if info.Config.MaxBytes != 1024 { t.Fatalf("invalid object stream MaxSize %+v", info.Config.MaxBytes) } } func TestListObjectStores(t *testing.T) { tests := []struct { name string bucketsNum int }{ { name: "single page", bucketsNum: 5, }, { name: "multi page", bucketsNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() // create stream without the chunk subject, but with OBJ_ prefix _, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "OBJ_FOO", Subjects: []string{"FOO.*"}}) expectOk(t, err) // create stream with chunk subject, but without "OBJ_" prefix _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "FOO", Subjects: []string{"$O.ABC.C.>"}}) expectOk(t, err) for i := 0; i < test.bucketsNum; i++ { _, err = js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: fmt.Sprintf("OBJS_%d", i), MaxBytes: 1024}) expectOk(t, err) } names := make([]string, 0) objectStoreNames := js.ObjectStoreNames(ctx) for name := range objectStoreNames.Name() { if strings.HasPrefix(name, "OBJ_") { t.Fatalf("Expected name without OBJ_ prefix, got: %s", name) } names = append(names, name) } expectOk(t, objectStoreNames.Error()) if len(names) != test.bucketsNum { t.Fatalf("Invalid number of stream names; want: %d; got: %d", test.bucketsNum, len(names)) } infos := make([]jetstream.ObjectStoreStatus, 0) objectStores := js.ObjectStores(ctx) for info := range objectStores.Status() { infos = append(infos, info) } expectOk(t, objectStores.Error()) if len(infos) != test.bucketsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.bucketsNum, len(infos)) } }) } } func TestGetObjectDigestValue(t *testing.T) { tests := []struct { inputFile string expected string }{ { inputFile: "digester_test_bytes_000100.txt", expected: "SHA-256=IdgP4UYMGt47rgecOqFoLrd24AXukHf5-SVzqQ5Psg8=", }, { inputFile: "digester_test_bytes_001000.txt", expected: "SHA-256=DZj4RnBpuEukzFIY0ueZ-xjnHY4Rt9XWn4Dh8nkNfnI=", }, { inputFile: "digester_test_bytes_010000.txt", expected: "SHA-256=RgaJ-VSJtjNvgXcujCKIvaheiX_6GRCcfdRYnAcVy38=", }, { inputFile: "digester_test_bytes_100000.txt", expected: "SHA-256=yan7pwBVnC1yORqqgBfd64_qAw6q9fNA60_KRiMMooE=", }, } for _, test := range tests { t.Run(test.inputFile, func(t *testing.T) { data, err := os.ReadFile(fmt.Sprintf("./testdata/%s", test.inputFile)) expectOk(t, err) h := sha256.New() h.Write(data) if res := nats.GetObjectDigestValue(h); res != test.expected { t.Fatalf("Invalid digest; want: %s; got: %s", test.expected, res) } }) } } func TestDecodeObjectDigest(t *testing.T) { tests := []struct { inputDigest string expectedFile string withError error }{ { expectedFile: "digester_test_bytes_000100.txt", inputDigest: "SHA-256=IdgP4UYMGt47rgecOqFoLrd24AXukHf5-SVzqQ5Psg8=", }, { expectedFile: "digester_test_bytes_001000.txt", inputDigest: "SHA-256=DZj4RnBpuEukzFIY0ueZ-xjnHY4Rt9XWn4Dh8nkNfnI=", }, { expectedFile: "digester_test_bytes_010000.txt", inputDigest: "SHA-256=RgaJ-VSJtjNvgXcujCKIvaheiX_6GRCcfdRYnAcVy38=", }, { expectedFile: "digester_test_bytes_100000.txt", inputDigest: "SHA-256=yan7pwBVnC1yORqqgBfd64_qAw6q9fNA60_KRiMMooE=", }, } for _, test := range tests { t.Run(test.expectedFile, func(t *testing.T) { expected, err := os.ReadFile(fmt.Sprintf("./testdata/%s", test.expectedFile)) h := sha256.New() h.Write(expected) expected = h.Sum(nil) expectOk(t, err) res, err := nats.DecodeObjectDigest(test.inputDigest) if test.withError != nil { expectErr(t, err, nats.ErrInvalidDigestFormat) return } expectOk(t, err) if !bytes.Equal(res[:], expected) { t.Fatalf("Invalid decoded value; want: %s; got: %s", expected, res) } }) } } func TestObjectStoreGetObjectContextTimeout(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(context.Background(), jetstream.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) blob := make([]byte, 1024) _, err = rand.Read(blob) expectOk(t, err) _, err = obs.PutBytes(context.Background(), "blob", blob) expectOk(t, err) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() r, err := obs.Get(ctx, "blob") expectOk(t, err) time.Sleep(15 * time.Millisecond) var res []byte _, err = r.Read(res) expectErr(t, err, nats.ErrTimeout) r.Close() } func TestObjectStoreCompression(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() ctx := context.Background() objCompressed, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{ Bucket: "A", Compression: true, }) if err != nil { t.Fatalf("Error creating object store: %v", err) } status, err := objCompressed.Status(ctx) if err != nil { t.Fatalf("Error getting bucket status: %v", err) } if !status.IsCompressed() { t.Fatalf("Expected bucket to be compressed") } objStream, err := js.Stream(ctx, "OBJ_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if objStream.CachedInfo().Config.Compression != jetstream.S2Compression { t.Fatalf("Expected stream to be compressed with S2") } } func TestObjectStoreMirror(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() bucketName := "test-bucket" ctx := context.Background() obs, err := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: bucketName, Description: "testing"}) expectOk(t, err) mirrorBucketName := "mirror-test-bucket" _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: fmt.Sprintf("OBJ_%s", mirrorBucketName), Mirror: &jetstream.StreamSource{ Name: fmt.Sprintf("OBJ_%s", bucketName), SubjectTransforms: []jetstream.SubjectTransformConfig{ { Source: fmt.Sprintf("$O.%s.>", bucketName), Destination: fmt.Sprintf("$O.%s.>", mirrorBucketName), }, }, }, AllowRollup: true, // meta messages are always rollups }) if err != nil { t.Fatalf("Error creating object store bucket mirror: %v", err) } _, err = obs.PutString(ctx, "A", "abc") expectOk(t, err) mirrorObs, err := js.ObjectStore(ctx, mirrorBucketName) expectOk(t, err) // Make sure we sync. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { mirrorValue, err := mirrorObs.GetString(ctx, "A") if err != nil { return err } if mirrorValue != "abc" { t.Fatalf("Expected mirrored object store value to be the same as original") } return nil }) watcher, err := mirrorObs.Watch(ctx) if err != nil { t.Fatalf("Error creating watcher: %v", err) } defer watcher.Stop() // expect to get one value and nil for { select { case info := <-watcher.Updates(): if info == nil { return } case <-time.After(2 * time.Second): t.Fatalf("Expected to receive an update") } } } nats.go-1.41.0/jetstream/test/ordered_test.go000066400000000000000000001646501477351342400211630ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "reflect" "sync" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestOrderedConsumerConsume(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) wg.Wait() name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() l.Stop() }) t.Run("reset consumer before receiving any messages", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} l, err := c.Consume(func(msg jetstream.Msg) { wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(500 * time.Millisecond) name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() l.Stop() }) t.Run("with custom start seq", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{DeliverPolicy: jetstream.DeliverByStartSequencePolicy, OptStartSeq: 3}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} wg.Add(len(testMsgs) - 2) l, err := c.Consume(func(msg jetstream.Msg) { wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() wg.Wait() time.Sleep(500 * time.Millisecond) // now delete consumer again and publish some more messages, all should be received normally info, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteConsumer(ctx, info.Config.Name); err != nil { t.Fatal(err) } wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() }) t.Run("base usage, server shutdown", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) errs := make(chan error) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { if errors.Is(err, jetstream.ErrConsumerNotFound) { errs <- err } })) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) wg.Wait() srv = restartBasicJSServer(t, srv) defer shutdownJSServerAndRemoveStorage(t, srv) wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() l.Stop() }) t.Run("base usage, consumer does not exist", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteConsumer(ctx, c.CachedInfo().Config.Name); err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error) msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { errs <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) select { case err := <-errs: if !errors.Is(err, nats.ErrNoResponders) { t.Fatalf("Expected error: %v; got: %v", nats.ErrNoResponders, err) } case <-time.After(5 * time.Second): t.Fatal("timeout waiting for error") } wg.Wait() wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() l.Stop() }) t.Run("consumer used as fetch", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for range msgs.Messages() { } if _, err := c.Consume(func(msg jetstream.Msg) {}); !errors.Is(err, jetstream.ErrOrderConsumerUsedAsFetch) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrOrderConsumerUsedAsFetch, err) } }) t.Run("error running concurrent consume requests", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) defer cc.Stop() if err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := c.Consume(func(msg jetstream.Msg) {}); !errors.Is(err, jetstream.ErrOrderedConsumerConcurrentRequests) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrOrderedConsumerConcurrentRequests, err) } }) t.Run("with auto unsubscribe", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(50) _, err = c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) msg.Ack() wg.Done() }, jetstream.StopAfter(50), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Wait() time.Sleep(10 * time.Millisecond) ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } time.Sleep(10 * time.Millisecond) }) t.Run("with auto unsubscribe and consumer reset", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(100) _, err = c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) msg.Ack() wg.Done() }, jetstream.StopAfter(150), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Wait() if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Add(50) for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } wg.Wait() time.Sleep(10 * time.Millisecond) ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } time.Sleep(10 * time.Millisecond) }) t.Run("drain mode", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} wg.Add(5) publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) cc.Drain() wg.Wait() }) t.Run("stop consume during reset", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 10; i++ { c, err := s.OrderedConsumer(context.Background(), jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) { msg.Ack() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteConsumer(context.Background(), c.CachedInfo().Name); err != nil { t.Fatalf("Unexpected error: %v", err) } cc.Stop() time.Sleep(50 * time.Millisecond) } }) t.Run("wait for closed after drain", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) lock := sync.Mutex{} publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() lock.Lock() msgs = append(msgs, msg) lock.Unlock() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } closed := cc.Closed() time.Sleep(100 * time.Millisecond) if err := s.DeleteConsumer(context.Background(), c.CachedInfo().Name); err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) // wait for the consumer to be recreated before calling drain for i := 0; i < 5; i++ { _, err = c.Info(ctx) if err != nil { if errors.Is(err, jetstream.ErrConsumerNotFound) { time.Sleep(100 * time.Millisecond) continue } t.Fatalf("Unexpected error: %v", err) } break } cc.Drain() select { case <-closed: case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for consume to be closed") } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count after consume closed; want %d; got %d", 2*len(testMsgs), len(msgs)) } }) t.Run("wait for closed on already closed consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) lock := sync.Mutex{} publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() lock.Lock() msgs = append(msgs, msg) lock.Unlock() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) if err := s.DeleteConsumer(context.Background(), c.CachedInfo().Name); err != nil { t.Fatalf("Unexpected error: %v", err) } cc.Stop() time.Sleep(100 * time.Millisecond) select { case <-cc.Closed(): case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for consume to be closed") } }) } func TestOrderedConsumerMessages(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("base usage, server restart", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } srv = restartBasicJSServer(t, srv) defer shutdownJSServerAndRemoveStorage(t, srv) publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("base usage, missing heartbeat", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.PullHeartbeat(1 * time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } publishTestMsgs(t, js) for i := 0; i < 5; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs = append(msgs, msg) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("with auto unsubscribe", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "test", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.StopAfter(50), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 50; i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) } if _, err := it.Next(); err != jetstream.ErrMsgIteratorClosed { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != 50 { t.Fatalf("Unexpected received message count; want %d; got %d", 50, len(msgs)) } ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } }) t.Run("with auto unsubscribe and consumer reset", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "test", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.StopAfter(150), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) } if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } for i := 0; i < 50; i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) } if _, err := it.Next(); err != jetstream.ErrMsgIteratorClosed { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != 150 { t.Fatalf("Unexpected received message count; want %d; got %d", 50, len(msgs)) } ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } }) t.Run("consumer used as fetch", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for range msgs.Messages() { } if _, err := c.Messages(); !errors.Is(err, jetstream.ErrOrderConsumerUsedAsFetch) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrOrderConsumerUsedAsFetch, err) } }) t.Run("error running concurrent consume requests", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Messages() defer cc.Stop() if err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := c.Messages(); !errors.Is(err, jetstream.ErrOrderedConsumerConcurrentRequests) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrOrderedConsumerConcurrentRequests, err) } }) t.Run("drain mode", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) go func() { time.Sleep(100 * time.Millisecond) it.Drain() }() for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } time.Sleep(50 * time.Millisecond) msg.Ack() msgs = append(msgs, msg) } _, err = it.Next() if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count after drain; want %d; got %d", len(testMsgs), len(msgs)) } }) } func TestOrderedConsumerFetch(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) publishTestMsgs(t, js) res, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } publishTestMsgs(t, js) res, err = c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("with custom deliver policy", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) for i := 0; i < 5; i++ { if _, err := js.Publish(context.Background(), "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } for i := 0; i < 5; i++ { if _, err := js.Publish(context.Background(), "FOO.B", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{ DeliverPolicy: jetstream.DeliverLastPerSubjectPolicy, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } res, err := c.Fetch(int(c.CachedInfo().NumPending), jetstream.FetchMaxWait(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 2 { t.Fatalf("Expected %d messages; got: %d", 2, len(msgs)) } expectedSubjects := []string{"FOO.A", "FOO.B"} for i := range msgs { if msgs[i].Subject() != expectedSubjects[i] { t.Fatalf("Expected subject: %s; got: %s", expectedSubjects[i], msgs[i].Subject()) } } }) t.Run("consumer used as consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) if err != nil { t.Fatalf("Unexpected error: %s", err) } cc.Stop() _, err = c.Fetch(5) if !errors.Is(err, jetstream.ErrOrderConsumerUsedAsConsume) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderConsumerUsedAsConsume, err) } }) t.Run("concurrent fetch requests", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) res, err := c.Fetch(1, jetstream.FetchMaxWait(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } _, err = c.Fetch(1) if !errors.Is(err, jetstream.ErrOrderedConsumerConcurrentRequests) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderedConsumerConcurrentRequests, err) } for msg := range res.Messages() { msg.Ack() } }) } func TestOrderedConsumerFetchBytes(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) publishTestMsgs(t, js) res, err := c.FetchBytes(500, jetstream.FetchMaxWait(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } publishTestMsgs(t, js) res, err = c.Fetch(500, jetstream.FetchMaxWait(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("consumer used as consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) if err != nil { t.Fatalf("Unexpected error: %s", err) } cc.Stop() _, err = c.FetchBytes(500) if !errors.Is(err, jetstream.ErrOrderConsumerUsedAsConsume) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderConsumerUsedAsConsume, err) } }) t.Run("concurrent fetch requests", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) res, err := c.FetchBytes(500, jetstream.FetchMaxWait(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } _, err = c.FetchBytes(500) if !errors.Is(err, jetstream.ErrOrderedConsumerConcurrentRequests) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderedConsumerConcurrentRequests, err) } for msg := range res.Messages() { msg.Ack() } }) } func TestOrderedConsumerNext(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) _, err = c.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } _, err = c.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } }) t.Run("consumer used as consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) if err != nil { t.Fatalf("Unexpected error: %s", err) } cc.Stop() _, err = c.Next() if !errors.Is(err, jetstream.ErrOrderConsumerUsedAsConsume) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderConsumerUsedAsConsume, err) } }) t.Run("preserve sequence after fetch error", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } msg, err := c.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } meta, err := msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %s", err) } if meta.Sequence.Stream != 1 { t.Fatalf("Expected sequence: %d; got: %d", 1, meta.Sequence.Stream) } // get next message, it should time out (no more messages on stream) _, err = c.Next(jetstream.FetchMaxWait(100 * time.Millisecond)) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected error: %s; got: %s", nats.ErrTimeout, err) } if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } // get next message, it should have stream sequence 2 msg, err = c.Next() if err != nil { t.Fatalf("Unexpected error: %s", err) } meta, err = msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %s", err) } if meta.Sequence.Stream != 2 { t.Fatalf("Expected sequence: %d; got: %d", 2, meta.Sequence.Stream) } }) } func TestOrderedConsumerFetchNoWait(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("base usage, delete consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) publishTestMsgs(t, js) res, err := c.FetchNoWait(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } name := c.CachedInfo().Name if err := s.DeleteConsumer(ctx, name); err != nil { t.Fatal(err) } publishTestMsgs(t, js) res, err = c.FetchNoWait(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 2*len(testMsgs) { t.Fatalf("Expected %d messages; got: %d", 2*len(testMsgs), len(msgs)) } }) t.Run("consumer used as consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) if err != nil { t.Fatalf("Unexpected error: %s", err) } cc.Stop() _, err = c.FetchNoWait(5) if !errors.Is(err, jetstream.ErrOrderConsumerUsedAsConsume) { t.Fatalf("Expected error: %s; got: %s", jetstream.ErrOrderConsumerUsedAsConsume, err) } }) } func TestOrderedConsumerInfo(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := js.OrderedConsumer(ctx, "foo", jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } cc, err := c.Consume(func(msg jetstream.Msg) {}) if err != nil { t.Fatalf("Unexpected error: %s", err) } defer cc.Stop() info, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %s", err) } initialName := info.Name if err := s.DeleteConsumer(ctx, initialName); err != nil { t.Fatalf("Unexpected error: %s", err) } time.Sleep(50 * time.Millisecond) info, err = c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %s", err) } if info.Name == initialName { t.Fatalf("New consumer should be returned; got: %s", info.Name) } } func TestOrderedConsumerNextTimeout(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Next(jetstream.FetchMaxWait(1 * time.Second)) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected error: %v; got: %v", nats.ErrTimeout, err) } } func TestOrderedConsumerNextOrder(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() publishFailed := make(chan error, 1) go func() { for i := 0; i < 1000; i++ { _, err := js.Publish(ctx, "FOO.A", []byte(fmt.Sprintf("%d", 1))) if err != nil { publishFailed <- err } } }() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.OrderedConsumer(ctx, jetstream.OrderedConsumerConfig{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 1000; i++ { select { case err := <-publishFailed: t.Fatalf("Publish error: %v", err) default: } msg, err := c.Next(jetstream.FetchMaxWait(5 * time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } meta, err := msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %v", err) } if meta.Sequence.Stream != uint64(i+1) { t.Fatalf("Unexpected sequence number: %d", meta.Sequence.Stream) } } } func TestOrderedConsumerConfig(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } tests := []struct { name string config jetstream.OrderedConsumerConfig expected jetstream.ConsumerConfig }{ { name: "default config", config: jetstream.OrderedConsumerConfig{}, expected: jetstream.ConsumerConfig{ DeliverPolicy: jetstream.DeliverAllPolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 5 * time.Minute, Replicas: 1, MemoryStorage: true, }, }, { name: "custom inactive threshold", config: jetstream.OrderedConsumerConfig{ InactiveThreshold: 10 * time.Second, }, expected: jetstream.ConsumerConfig{ DeliverPolicy: jetstream.DeliverAllPolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, }, }, { name: "custom opt start seq and inactive threshold", config: jetstream.OrderedConsumerConfig{ DeliverPolicy: jetstream.DeliverByStartSequencePolicy, OptStartSeq: 10, InactiveThreshold: 10 * time.Second, }, expected: jetstream.ConsumerConfig{ OptStartSeq: 10, DeliverPolicy: jetstream.DeliverByStartSequencePolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, }, }, { name: "all fields customized, start with custom seq", config: jetstream.OrderedConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, DeliverPolicy: jetstream.DeliverByStartSequencePolicy, OptStartSeq: 10, ReplayPolicy: jetstream.ReplayOriginalPolicy, InactiveThreshold: 10 * time.Second, HeadersOnly: true, Metadata: map[string]string{"foo": "a"}, }, expected: jetstream.ConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, OptStartSeq: 10, DeliverPolicy: jetstream.DeliverByStartSequencePolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, HeadersOnly: true, Metadata: map[string]string{"foo": "a"}, }, }, { name: "all fields customized, start with custom time", config: jetstream.OrderedConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, DeliverPolicy: jetstream.DeliverByStartTimePolicy, OptStartTime: &time.Time{}, ReplayPolicy: jetstream.ReplayOriginalPolicy, InactiveThreshold: 10 * time.Second, HeadersOnly: true, Metadata: map[string]string{"foo": "a"}, }, expected: jetstream.ConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, OptStartTime: &time.Time{}, DeliverPolicy: jetstream.DeliverByStartTimePolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, HeadersOnly: true, Metadata: map[string]string{"foo": "a"}, }, }, { name: "both start seq and time set, deliver policy start seq", config: jetstream.OrderedConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, DeliverPolicy: jetstream.DeliverByStartSequencePolicy, OptStartSeq: 10, OptStartTime: &time.Time{}, ReplayPolicy: jetstream.ReplayOriginalPolicy, InactiveThreshold: 10 * time.Second, HeadersOnly: true, }, expected: jetstream.ConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, OptStartSeq: 10, OptStartTime: nil, DeliverPolicy: jetstream.DeliverByStartSequencePolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, HeadersOnly: true, }, }, { name: "both start seq and time set, deliver policy start time", config: jetstream.OrderedConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, DeliverPolicy: jetstream.DeliverByStartTimePolicy, OptStartSeq: 10, OptStartTime: &time.Time{}, ReplayPolicy: jetstream.ReplayOriginalPolicy, InactiveThreshold: 10 * time.Second, HeadersOnly: true, }, expected: jetstream.ConsumerConfig{ FilterSubjects: []string{"foo.a", "foo.b"}, OptStartSeq: 0, OptStartTime: &time.Time{}, DeliverPolicy: jetstream.DeliverByStartTimePolicy, AckPolicy: jetstream.AckNonePolicy, MaxDeliver: -1, MaxWaiting: 512, InactiveThreshold: 10 * time.Second, Replicas: 1, MemoryStorage: true, HeadersOnly: true, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { c, err := s.OrderedConsumer(context.Background(), test.config) if err != nil { t.Fatalf("Unexpected error: %v", err) } cfg := c.CachedInfo().Config test.expected.Name = cfg.Name if test.config.Metadata != nil { for k, v := range test.config.Metadata { if cfg.Metadata[k] != v { t.Fatalf("Expected config %+v, got %+v", test.expected, cfg) } } } test.expected.Metadata = cfg.Metadata if !reflect.DeepEqual(test.expected, cfg) { t.Fatalf("Expected config %+v, got %+v", test.expected, cfg) } }) } } nats.go-1.41.0/jetstream/test/publish_test.go000066400000000000000000001337131477351342400212010ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "os" "reflect" "sync" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestPublishMsg(t *testing.T) { type publishConfig struct { msg *nats.Msg opts []jetstream.PublishOpt expectedHeaders nats.Header expectedAck jetstream.PubAck withError func(*testing.T, error) } tests := []struct { name string srvConfig []byte timeout time.Duration msgs []publishConfig }{ { name: "publish 3 simple messages, no opts", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, Domain: "", }, }, }, }, { name: "publish 3 messages with message ID, with duplicate", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Duplicate: true, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"2"}, }, }, }, }, { name: "expect last msg ID", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"2"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Msg-Id": []string{"2"}, }, }, }, }, { name: "invalid last msg id", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastMsgID("abc")}, withError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10070 { t.Fatalf("Expected error code: 10070; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect last sequence", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequence(2)}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Sequence": []string{"2"}, }, }, }, }, { name: "invalid last seq", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequence(123)}, withError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10071 { t.Fatalf("Expected error code: 10071; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect last sequence per subject", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequencePerSubject(1)}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Subject-Sequence": []string{"1"}, }, }, }, }, { name: "invalid last sequence per subject", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequencePerSubject(123)}, withError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10071 { t.Fatalf("Expected error code: 10071; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect stream header", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectStream("foo")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Expected-Stream": []string{"foo"}, }, }, }, }, { name: "invalid expected stream header", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectStream("abc")}, withError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10060 { t.Fatalf("Expected error code: 10060; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "publish 3 simple messages with domain set", srvConfig: []byte( ` listen: 127.0.0.1:-1 jetstream: {domain: "test-domain"} `), msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Domain: "test-domain", }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, Domain: "test-domain", }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, Domain: "test-domain", }, }, }, }, { name: "publish timeout", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Expected-Stream": []string{"foo"}, }, withError: func(t *testing.T, err error) { if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("Expected deadline exceeded error; got: %v", err) } }, }, }, timeout: 1 * time.Nanosecond, }, { name: "invalid option set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithStallWait(1 * time.Second)}, withError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }, }, }, }, { name: "no subject set on message", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), }, opts: []jetstream.PublishOpt{}, withError: func(t *testing.T, err error) { if !errors.Is(err, nats.ErrBadSubject) { t.Fatalf("Expected error: %v; got: %v", nats.ErrBadSubject, err) } }, }, }, }, { name: "invalid subject set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "ABC", }, opts: []jetstream.PublishOpt{}, withError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrNoStreamResponse) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrNoStreamResponse, err) } }, }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var srv *server.Server if test.srvConfig != nil { conf := createConfFile(t, test.srvConfig) defer os.Remove(conf) srv, _ = RunServerWithConfig(conf) } else { srv = RunBasicJetStreamServer() } defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, pub := range test.msgs { var pubCtx context.Context var pubCancel context.CancelFunc if test.timeout != 0 { pubCtx, pubCancel = context.WithTimeout(ctx, test.timeout) } else { pubCtx, pubCancel = context.WithTimeout(ctx, 1*time.Minute) } ack, err := js.PublishMsg(pubCtx, pub.msg, pub.opts...) pubCancel() if pub.withError != nil { pub.withError(t, err) continue } if err != nil { t.Fatalf("Unexpected error: %v", err) } if !reflect.DeepEqual(pub.expectedHeaders, pub.msg.Header) { t.Fatalf("Invalid headers on message; want: %v; got: %v", pub.expectedHeaders, pub.msg.Header) } if *ack != pub.expectedAck { t.Fatalf("Invalid ack received; want: %v; got: %v", pub.expectedAck, ack) } } }) } } func TestPublishWithTTL(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64, AllowMsgTTL: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := js.Publish(ctx, "FOO.1", []byte("msg"), jetstream.WithMsgTTL(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } gotMsg, err := stream.GetMsg(ctx, ack.Sequence) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "1s" { t.Fatalf("Expected message to have TTL header set to 1s; got: %s", ttl) } time.Sleep(1500 * time.Millisecond) _, err = stream.GetMsg(ctx, ack.Sequence) if !errors.Is(err, jetstream.ErrMsgNotFound) { t.Fatalf("Expected not found error; got: %v", err) } } func TestMsgDeleteMarkerMaxAge(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, AllowMsgTTL: true, SubjectDeleteMarkerTTL: 50 * time.Second, MaxAge: 1 * time.Second}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.Publish(ctx, "FOO.1", []byte("msg1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(1500 * time.Millisecond) gotMsg, err := stream.GetLastMsgForSubject(ctx, "FOO.1") if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttlMarker := gotMsg.Header.Get("Nats-Marker-Reason"); ttlMarker != "MaxAge" { t.Fatalf("Expected message to have Marker-Reason header set to MaxAge; got: %s", ttlMarker) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "50s" { t.Fatalf("Expected message to have Nats-TTL header set to 50s; got: %s", ttl) } } func TestPublishAsyncWithTTL(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64, AllowMsgTTL: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } paf, err := js.PublishAsync("FOO.1", []byte("msg"), jetstream.WithMsgTTL(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } var ack *jetstream.PubAck select { case ack = <-paf.Ok(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive ack") } gotMsg, err := stream.GetMsg(ctx, ack.Sequence) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "1s" { t.Fatalf("Expected message to have TTL header set to 1s; got: %s", ttl) } time.Sleep(1500 * time.Millisecond) _, err = stream.GetMsg(ctx, ack.Sequence) if !errors.Is(err, jetstream.ErrMsgNotFound) { t.Fatalf("Expected not found error; got: %v", err) } } func TestPublish(t *testing.T) { // Only very basic test cases, as most use cases are tested in TestPublishMsg tests := []struct { name string msg []byte subject string opts []jetstream.PublishOpt withError error }{ { name: "publish single message on stream, no options", msg: []byte("msg"), subject: "FOO.1", }, { name: "publish single message on stream with message id", msg: []byte("msg"), subject: "FOO.1", opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, }, { name: "empty subject passed", msg: []byte("msg"), subject: "", withError: nats.ErrBadSubject, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := js.Publish(ctx, test.subject, test.msg, test.opts...) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if ack.Sequence != 1 || ack.Stream != "foo" { t.Fatalf("Invalid ack; want sequence 1 on stream foo, got: %v", ack) } }) } } func TestPublishTimeout(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc, jetstream.WithDefaultTimeout(200*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // create stream with no ack to force timeout _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, NoAck: true, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } now := time.Now() _, err = js.Publish(context.Background(), "FOO.1", []byte("msg")) if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("Expected deadline exceeded error; got: %v", err) } since := time.Since(now) if since < 200*time.Millisecond || since > 500*time.Millisecond { t.Fatalf("Expected timeout to be around 200ms; got: %v", since) } } func TestPublishMsgAsync(t *testing.T) { type publishConfig struct { msg *nats.Msg opts []jetstream.PublishOpt expectedHeaders nats.Header expectedAck jetstream.PubAck withAckError func(*testing.T, error) withPublishError func(*testing.T, error) } tests := []struct { name string msgs []publishConfig srvConfig []byte timeout time.Duration }{ { name: "publish 3 simple messages, no opts", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, Domain: "", }, }, }, }, { name: "publish with ack timeout set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, Domain: "", }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, Domain: "", }, }, }, timeout: time.Second, }, { name: "publish 3 messages with message ID, with duplicate", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Duplicate: true, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"2"}, }, }, }, }, { name: "expect last msg ID", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"2"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastMsgID("2")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Msg-Id": []string{"2"}, }, }, }, }, { name: "invalid last msg id", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithMsgID("1")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Msg-Id": []string{"1"}, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastMsgID("abc")}, withAckError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10070 { t.Fatalf("Expected error code: 10070; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect last sequence", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequence(2)}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Sequence": []string{"2"}, }, }, }, }, { name: "invalid last seq", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequence(123)}, withAckError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10071 { t.Fatalf("Expected error code: 10071; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect last sequence per subject", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequencePerSubject(1)}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, }, expectedHeaders: nats.Header{ "Nats-Expected-Last-Subject-Sequence": []string{"1"}, }, }, }, }, { name: "invalid last sequence per subject", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.2", }, opts: []jetstream.PublishOpt{}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectLastSequencePerSubject(123)}, withAckError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10071 { t.Fatalf("Expected error code: 10071; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "expect stream header", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectStream("foo")}, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, }, expectedHeaders: nats.Header{ "Nats-Expected-Stream": []string{"foo"}, }, }, }, }, { name: "invalid expected stream header", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithExpectStream("abc")}, withAckError: func(t *testing.T, err error) { var apiErr *jetstream.APIError if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected API error; got: %v", err) } if apiErr.ErrorCode != 10060 { t.Fatalf("Expected error code: 10060; got: %d", apiErr.ErrorCode) } }, }, }, }, { name: "publish 3 simple messages with domain set", srvConfig: []byte( ` listen: 127.0.0.1:-1 jetstream: {domain: "test-domain"} `), msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 1, Domain: "test-domain", }, }, { msg: &nats.Msg{ Data: []byte("msg 2"), Subject: "FOO.1", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 2, Domain: "test-domain", }, }, { msg: &nats.Msg{ Data: []byte("msg 3"), Subject: "FOO.2", }, expectedAck: jetstream.PubAck{ Stream: "foo", Sequence: 3, Domain: "test-domain", }, }, }, }, { name: "invalid subject set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "ABC", }, withAckError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrNoStreamResponse) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrNoStreamResponse, err) } }, }, }, }, { name: "invalid retry number set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithRetryAttempts(-1)}, withPublishError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }, }, }, }, { name: "invalid retry wait set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithRetryWait(-1)}, withPublishError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }, }, }, }, { name: "invalid stall wait set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", }, opts: []jetstream.PublishOpt{jetstream.WithStallWait(-1)}, withPublishError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }, }, }, }, { name: "reply subject set", msgs: []publishConfig{ { msg: &nats.Msg{ Data: []byte("msg 1"), Subject: "FOO.1", Reply: "BAR", }, withPublishError: func(t *testing.T, err error) { if !errors.Is(err, jetstream.ErrAsyncPublishReplySubjectSet) { t.Fatalf("Expected error: %v; got: %v", nats.ErrNoResponders, err) } }, }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var srv *server.Server if test.srvConfig != nil { conf := createConfFile(t, test.srvConfig) defer os.Remove(conf) srv, _ = RunServerWithConfig(conf) } else { srv = RunBasicJetStreamServer() } defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } opts := []jetstream.JetStreamOpt{} if test.timeout != 0 { opts = append(opts, jetstream.WithPublishAsyncTimeout(test.timeout)) } js, err := jetstream.New(nc, opts...) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, pub := range test.msgs { ackFuture, err := js.PublishMsgAsync(pub.msg, pub.opts...) if pub.withPublishError != nil { pub.withPublishError(t, err) continue } select { case ack := <-ackFuture.Ok(): if pub.withAckError != nil { t.Fatalf("Expected error, got nil") } if *ack != pub.expectedAck { t.Fatalf("Invalid ack received; want: %v; got: %v", pub.expectedAck, ack) } msg := ackFuture.Msg() if !reflect.DeepEqual(pub.expectedHeaders, msg.Header) { t.Fatalf("Invalid headers on message; want: %v; got: %v", pub.expectedHeaders, pub.msg.Header) } if string(msg.Data) != string(pub.msg.Data) { t.Fatalf("Invalid message in ack; want: %q; got: %q", string(pub.msg.Data), string(msg.Data)) } case err := <-ackFuture.Err(): if pub.withAckError == nil { t.Fatalf("Expected no error. got: %v", err) } pub.withAckError(t, err) } } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } }) } } func TestPublishMsgAsyncWithPendingMsgs(t *testing.T) { t.Run("outstanding ack exceed limit", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc, jetstream.WithPublishAsyncMaxPending(5)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 20; i++ { _, err = js.PublishAsync("FOO.1", []byte("msg")) if err != nil { t.Fatalf("Unexpected error: %v", err) } if numPending := js.PublishAsyncPending(); numPending > 5 { t.Fatalf("Expected 5 pending messages, got: %d", numPending) } } }) t.Run("too many messages without ack", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc, jetstream.WithPublishAsyncMaxPending(5)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, // disable stream acks NoAck: true, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { _, err = js.PublishAsync("FOO.1", []byte("msg"), jetstream.WithStallWait(10*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } } if _, err = js.PublishAsync("FOO.1", []byte("msg"), jetstream.WithStallWait(10*time.Millisecond)); err == nil || !errors.Is(err, jetstream.ErrTooManyStalledMsgs) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrTooManyStalledMsgs, err) } }) t.Run("with server restart", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } go func() { for i := 0; i < 50; i++ { _, _ = js.PublishAsync("FOO.1", []byte("msg")) } }() srv = restartBasicJSServer(t, srv) defer shutdownJSServerAndRemoveStorage(t, srv) select { case <-js.PublishAsyncComplete(): case <-time.After(10 * time.Second): t.Fatalf("Did not receive completion signal") } }) } func TestPublishAsyncResetPendingOnReconnect(t *testing.T) { s := RunBasicJetStreamServer() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, err = js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error, 1) done := make(chan struct{}, 1) acks := make(chan jetstream.PubAckFuture, 100) wg := sync.WaitGroup{} go func() { for i := 0; i < 100; i++ { if ack, err := js.PublishAsync("FOO.A", []byte("hello")); err != nil { errs <- err return } else { acks <- ack } wg.Add(1) } close(acks) done <- struct{}{} }() select { case <-done: case err := <-errs: t.Fatalf("Unexpected error during publish: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } for ack := range acks { go func(paf jetstream.PubAckFuture) { select { case <-paf.Ok(): case err := <-paf.Err(): if !errors.Is(err, nats.ErrDisconnected) && !errors.Is(err, nats.ErrNoResponders) { errs <- fmt.Errorf("Expected error: %v or %v; got: %v", nats.ErrDisconnected, nats.ErrNoResponders, err) } case <-time.After(5 * time.Second): errs <- errors.New("Did not receive completion signal") } wg.Done() }(ack) } s = restartBasicJSServer(t, s) defer shutdownJSServerAndRemoveStorage(t, s) wg.Wait() select { case err := <-errs: t.Fatalf("Unexpected error: %v", err) default: } } func TestPublishAsyncRetry(t *testing.T) { tests := []struct { name string pubOpts []jetstream.PublishOpt ackError error }{ { name: "retry until stream is ready", pubOpts: []jetstream.PublishOpt{ jetstream.WithRetryAttempts(10), jetstream.WithRetryWait(100 * time.Millisecond), }, }, { name: "fail after max retries", pubOpts: []jetstream.PublishOpt{ jetstream.WithRetryAttempts(2), jetstream.WithRetryWait(50 * time.Millisecond), }, ackError: jetstream.ErrNoStreamResponse, }, { name: "retries disabled", pubOpts: []jetstream.PublishOpt{ jetstream.WithRetryAttempts(0), }, ackError: jetstream.ErrNoStreamResponse, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } // set max pending to 1 so that we can test if retries don't cause stall js, err := jetstream.New(nc, jetstream.WithPublishAsyncMaxPending(1)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() test.pubOpts = append(test.pubOpts, jetstream.WithStallWait(1*time.Nanosecond)) ack, err := js.PublishAsync("foo", []byte("hello"), test.pubOpts...) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishComplete := js.PublishAsyncComplete() errs := make(chan error, 1) go func() { // create stream with delay so that publish will receive no responders time.Sleep(300 * time.Millisecond) if _, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}); err != nil { errs <- err } }() select { case <-ack.Ok(): case err := <-ack.Err(): if test.ackError != nil { if !errors.Is(err, test.ackError) { t.Fatalf("Expected error: %v; got: %v", test.ackError, err) } } else { t.Fatalf("Unexpected ack error: %v", err) } case err := <-errs: t.Fatalf("Error creating stream: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for ack") } select { case <-publishComplete: case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } }) } } func TestPublishAsyncRetryInErrHandler(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } streamCreated := make(chan struct{}) errCB := func(js jetstream.JetStream, m *nats.Msg, e error) { <-streamCreated _, err := js.PublishMsgAsync(m) if err != nil { t.Fatalf("Unexpected error when republishing: %v", err) } } js, err := jetstream.New(nc, jetstream.WithPublishAsyncErrHandler(errCB)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() errs := make(chan error, 1) done := make(chan struct{}, 1) go func() { for i := 0; i < 10; i++ { if _, err := js.PublishAsync("FOO.A", []byte("hello"), jetstream.WithRetryAttempts(0)); err != nil { errs <- err return } } done <- struct{}{} }() select { case <-done: case err := <-errs: t.Fatalf("Unexpected error during publish: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } stream, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } close(streamCreated) select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } info, err := stream.Info(context.Background()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.State.Msgs != 10 { t.Fatalf("Expected 10 messages in the stream; got: %d", info.State.Msgs) } } func TestPublishAsyncAckTimeout(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error, 1) js, err := jetstream.New(nc, jetstream.WithPublishAsyncTimeout(50*time.Millisecond), jetstream.WithPublishAsyncErrHandler(func(js jetstream.JetStream, m *nats.Msg, e error) { errs <- e }), ) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, NoAck: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := js.PublishAsync("FOO.A", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-ack.Ok(): t.Fatalf("Expected timeout") case err := <-ack.Err(): if !errors.Is(err, jetstream.ErrAsyncPublishTimeout) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrAsyncPublishTimeout, err) } case <-time.After(time.Second): t.Fatalf("Did not receive ack timeout") } // check if error callback is called select { case err := <-errs: if !errors.Is(err, jetstream.ErrAsyncPublishTimeout) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrAsyncPublishTimeout, err) } case <-time.After(time.Second): t.Fatalf("Did not receive error from error handler") } if js.PublishAsyncPending() != 0 { t.Fatalf("Expected no pending messages") } select { case <-js.PublishAsyncComplete(): case <-time.After(100 * time.Millisecond): t.Fatalf("Did not receive completion signal") } } func TestPublishAsyncClearStall(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc, jetstream.WithPublishAsyncTimeout(500*time.Millisecond), jetstream.WithPublishAsyncMaxPending(100)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // use stream with no acks to test stalling _, err = js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, NoAck: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range 100 { _, err := js.PublishAsync("FOO.A", []byte("hello"), jetstream.WithStallWait(1*time.Nanosecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // after publishing 100 messages, next one should fail with ErrTooManyStalledMsgs _, err = js.PublishAsync("FOO.A", []byte("hello"), jetstream.WithStallWait(50*time.Millisecond)) if !errors.Is(err, jetstream.ErrTooManyStalledMsgs) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrTooManyStalledMsgs, err) } // after publish timeout all pending messages should be cleared // and we should be able to publish again select { case <-js.PublishAsyncComplete(): case <-time.After(2 * time.Second): t.Fatalf("Did not receive completion signal") } if _, err = js.PublishAsync("FOO.A", []byte("hello")); err != nil { t.Fatalf("Unexpected error: %v", err) } if js.PublishAsyncPending() != 1 { t.Fatalf("Expected 1 pending message; got: %d", js.PublishAsyncPending()) } } nats.go-1.41.0/jetstream/test/pull_test.go000066400000000000000000003035651477351342400205130ustar00rootroot00000000000000// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "context" "errors" "fmt" "sync" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestPullConsumerFetch(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("no options", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } i++ } if len(testMsgs) != i { t.Fatalf("Invalid number of messages received; want: %d; got: %d", len(testMsgs), i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) t.Run("delete consumer during fetch", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs, err := c.Fetch(10) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } var i int for msg := range msgs.Messages() { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } i++ } if len(testMsgs) != i { t.Fatalf("Invalid number of messages received; want: %d; got: %d", len(testMsgs), i) } if !errors.Is(msgs.Error(), jetstream.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, msgs.Error()) } }) t.Run("no options, fetch single messages one by one", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } res := make([]jetstream.Msg, 0) errs := make(chan error) done := make(chan struct{}) go func() { for { if len(res) == len(testMsgs) { close(done) return } msgs, err := c.Fetch(1) if err != nil { errs <- err return } msg := <-msgs.Messages() if msg != nil { res = append(res, msg) } if err := msgs.Error(); err != nil { errs <- err return } } }() time.Sleep(10 * time.Millisecond) publishTestMsgs(t, js) select { case err := <-errs: t.Fatalf("Unexpected error: %v", err) case <-done: if len(res) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(res)) } } for i, msg := range res { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with no wait, no messages at the time of request", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.FetchNoWait(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) publishTestMsgs(t, js) msg := <-msgs.Messages() if msg != nil { t.Fatalf("Expected no messages; got: %s", string(msg.Data())) } }) t.Run("with no wait, some messages available", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) time.Sleep(50 * time.Millisecond) msgs, err := c.FetchNoWait(10) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) publishTestMsgs(t, js) var msgsNum int for range msgs.Messages() { msgsNum++ } if err != nil { t.Fatalf("Unexpected error during fetch: %v", err) } if msgsNum != len(testMsgs) { t.Fatalf("Expected 5 messages, got: %d", msgsNum) } }) t.Run("with timeout", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.Fetch(5, jetstream.FetchMaxWait(50*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg != nil { t.Fatalf("Expected no messages; got: %s", string(msg.Data())) } }) t.Run("with invalid timeout value", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Fetch(5, jetstream.FetchMaxWait(-50*time.Millisecond)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) t.Run("consumer does not exist", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) // fetch 5 messages, should return normally msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for range msgs.Messages() { i++ } if i != len(testMsgs) { t.Fatalf("Expected 5 messages; got: %d", i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } // fetch again, should timeout without any error msgs, err = c.Fetch(5, jetstream.FetchMaxWait(200*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case _, ok := <-msgs.Messages(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(1 * time.Second): t.Fatalf("Expected channel to be closed") } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } // delete the consumer, at this point server should stop sending heartbeats for pull requests if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } msgs, err = c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case _, ok := <-msgs.Messages(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(1 * time.Second): t.Fatalf("Expected channel to be closed") } if !errors.Is(msgs.Error(), nats.ErrNoResponders) { t.Fatalf("Expected error: %v; got: %v", nats.ErrNoResponders, err) } }) t.Run("with invalid heartbeat value", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // default expiry (30s), hb too large _, err = c.Fetch(5, jetstream.FetchHeartbeat(20*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } // custom expiry, hb too large _, err = c.Fetch(5, jetstream.FetchHeartbeat(2*time.Second), jetstream.FetchMaxWait(3*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } // negative heartbeat _, err = c.Fetch(5, jetstream.FetchHeartbeat(-2*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) } func TestPullConsumerFetchRace(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 3; i++ { if _, err := js.Publish(context.Background(), "FOO.123", []byte(fmt.Sprintf("msg-%d", i))); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } errCh := make(chan error) go func() { for { err := msgs.Error() if err != nil { errCh <- err return } } }() deleteErrCh := make(chan error, 1) go func() { time.Sleep(100 * time.Millisecond) if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { deleteErrCh <- err } close(deleteErrCh) }() var i int for msg := range msgs.Messages() { if string(msg.Data()) != fmt.Sprintf("msg-%d", i) { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, fmt.Sprintf("msg-%d", i), string(msg.Data())) } i++ } if i != 3 { t.Fatalf("Invalid number of messages received; want: %d; got: %d", 5, i) } select { case err := <-errCh: if !errors.Is(err, jetstream.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, err) } case <-time.After(1 * time.Second): t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, nil) } // wait until the consumer is deleted, otherwise we may close the connection // before the consumer delete response is received select { case ert, ok := <-deleteErrCh: if !ok { break } t.Fatalf("Error deleting consumer: %s", ert) case <-time.After(1 * time.Second): t.Fatalf("Expected done to be closed") } } func TestPullConsumerFetchBytes(t *testing.T) { testSubject := "FOO.123" msg := [10]byte{} publishTestMsgs := func(t *testing.T, js jetstream.JetStream, count int) { for i := 0; i < count; i++ { if _, err := js.Publish(context.Background(), testSubject, msg[:]); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("no options, exact byte count received", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy, Name: "con"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js, 5) // actual received msg size will be 60 (payload=10 + Subject=7 + Reply=43) msgs, err := c.FetchBytes(300) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { msg.Ack() i++ } if i != 5 { t.Fatalf("Expected 5 messages; got: %d", i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) t.Run("no options, last msg does not fit max bytes", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy, Name: "con"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js, 5) // actual received msg size will be 60 (payload=10 + Subject=7 + Reply=43) msgs, err := c.FetchBytes(250) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { msg.Ack() i++ } if i != 4 { t.Fatalf("Expected 5 messages; got: %d", i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) t.Run("no options, single msg is too large", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy, Name: "con"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js, 5) // actual received msg size will be 60 (payload=10 + Subject=7 + Reply=43) msgs, err := c.FetchBytes(30) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { msg.Ack() i++ } if i != 0 { t.Fatalf("Expected 5 messages; got: %d", i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) t.Run("timeout waiting for messages", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy, Name: "con"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js, 5) // actual received msg size will be 60 (payload=10 + Subject=7 + Reply=43) msgs, err := c.FetchBytes(1000, jetstream.FetchMaxWait(50*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { msg.Ack() i++ } if i != 5 { t.Fatalf("Expected 5 messages; got: %d", i) } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) t.Run("consumer does not exist", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // fetch again, should timeout without any error msgs, err := c.FetchBytes(5, jetstream.FetchMaxWait(200*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case _, ok := <-msgs.Messages(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(1 * time.Second): t.Fatalf("Expected channel to be closed") } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } // delete the consumer if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } msgs, err = c.FetchBytes(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case _, ok := <-msgs.Messages(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(1 * time.Second): t.Fatalf("Expected channel to be closed") } if !errors.Is(msgs.Error(), nats.ErrNoResponders) { t.Fatalf("Expected error: %v; got: %v", nats.ErrNoResponders, err) } }) t.Run("with invalid heartbeat value", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // default expiry (30s), hb too large _, err = c.FetchBytes(5, jetstream.FetchHeartbeat(20*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } // custom expiry, hb too large _, err = c.FetchBytes(5, jetstream.FetchHeartbeat(2*time.Second), jetstream.FetchMaxWait(3*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } // negative heartbeat _, err = c.FetchBytes(5, jetstream.FetchHeartbeat(-2*time.Second)) if !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) } func TestPullConsumerFetch_WithCluster(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } name := "cluster" stream := jetstream.StreamConfig{ Name: name, Replicas: 1, Subjects: []string{"FOO.*"}, } t.Run("no options", func(t *testing.T) { withJSClusterAndStream(t, name, 3, stream, func(t *testing.T, subject string, srvs ...*jsServer) { srv := srvs[0] nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.Stream(ctx, stream.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs, err := c.Fetch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } var i int for msg := range msgs.Messages() { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } i++ } if msgs.Error() != nil { t.Fatalf("Unexpected error during fetch: %v", msgs.Error()) } }) }) t.Run("with no wait, no messages at the time of request", func(t *testing.T) { withJSClusterAndStream(t, name, 3, stream, func(t *testing.T, subject string, srvs ...*jsServer) { nc, err := nats.Connect(srvs[0].ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.Stream(ctx, stream.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := c.FetchNoWait(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) publishTestMsgs(t, js) msg := <-msgs.Messages() if msg != nil { t.Fatalf("Expected no messages; got: %s", string(msg.Data())) } }) }) } func TestPullConsumerMessages(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("no options", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() // calling Stop() multiple times should have no effect it.Stop() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } _, err = it.Next() if err == nil || !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } }) t.Run("with custom batch size", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.PullMaxMessages(3)) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() time.Sleep(10 * time.Millisecond) if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with max fitting 1 message", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.PullMaxBytes(60)) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() time.Sleep(10 * time.Millisecond) requestsNum, _, err := sub.Pending() if err != nil { t.Fatalf("Unexpected error: %v", err) } // with batch size set to 1, and 5 messages published on subject, there should be a total of 5 requests sent if requestsNum < 5 { t.Fatalf("Unexpected number of requests sent; want at least 5; got %d", requestsNum) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("remove consumer when fetching messages", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } _, err = it.Next() if !errors.Is(err, jetstream.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, err) } publishTestMsgs(t, js) time.Sleep(50 * time.Millisecond) _, err = it.Next() if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } }) t.Run("with custom max bytes", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.PullMaxBytes(150)) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() time.Sleep(10 * time.Millisecond) requestsNum, _, err := sub.Pending() if err != nil { t.Fatalf("Unexpected error: %v", err) } if requestsNum < 3 { t.Fatalf("Unexpected number of requests sent; want at least 3; got %d", requestsNum) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with batch size set to 1", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.PullMaxMessages(1)) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() time.Sleep(10 * time.Millisecond) requestsNum, _, err := sub.Pending() if err != nil { t.Fatalf("Unexpected error: %v", err) } // with batch size set to 1, and 5 messages published on subject, there should be a total of 5 requests sent if requestsNum != 5 { t.Fatalf("Unexpected number of requests sent; want 5; got %d", requestsNum) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with auto unsubscribe", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "test", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages(jetstream.StopAfter(50), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 50; i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } if err := msg.DoubleAck(ctx); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) } if _, err := it.Next(); err != jetstream.ErrMsgIteratorClosed { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != 50 { t.Fatalf("Unexpected received message count; want %d; got %d", 50, len(msgs)) } ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } }) t.Run("with auto unsubscribe concurrent", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "test", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } it, err := c.Messages(jetstream.StopAfter(50), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } var mu sync.Mutex // Mutex to guard the msgs slice. msgs := make([]jetstream.Msg, 0) var wg sync.WaitGroup wg.Add(50) for i := 0; i < 50; i++ { go func() { defer wg.Done() msg, err := it.Next() if err != nil { return } ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() if err := msg.DoubleAck(ctx); err == nil { // Only append the msg if ack is successful. mu.Lock() msgs = append(msgs, msg) mu.Unlock() } }() } wg.Wait() // Call Next in a goroutine so we can timeout if it doesn't return. errs := make(chan error) go func() { // This call should return the error ErrMsgIteratorClosed. _, err := it.Next() errs <- err }() timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-timer.C: t.Fatal("Timed out waiting for Next() to return") case err := <-errs: if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Unexpected error: %v", err) } } mu.Lock() wantLen, gotLen := 50, len(msgs) mu.Unlock() if wantLen != gotLen { t.Fatalf("Unexpected received message count; want %d; got %d", wantLen, gotLen) } ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } }) t.Run("create iterator, stop, then create again", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() time.Sleep(10 * time.Millisecond) publishTestMsgs(t, js) it, err = c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } if msg == nil { break } msg.Ack() msgs = append(msgs, msg) } it.Stop() if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } expectedMsgs := append(testMsgs, testMsgs...) for i, msg := range msgs { if string(msg.Data()) != expectedMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with invalid batch size", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Messages(jetstream.PullMaxMessages(-1)) if err == nil || !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) t.Run("with server restart", func(t *testing.T) { srv := RunBasicJetStreamServer() nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() done := make(chan struct{}) errs := make(chan error) publishTestMsgs(t, js) go func() { for i := 0; i < 2*len(testMsgs); i++ { msg, err := it.Next() if err != nil { errs <- err return } msg.Ack() msgs = append(msgs, msg) } done <- struct{}{} }() time.Sleep(10 * time.Millisecond) // restart the server srv = restartBasicJSServer(t, srv) defer shutdownJSServerAndRemoveStorage(t, srv) time.Sleep(10 * time.Millisecond) publishTestMsgs(t, js) select { case <-done: if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } case err := <-errs: t.Fatalf("Unexpected error: %s", err) } }) t.Run("with graceful shutdown", func(t *testing.T) { cases := map[string]func(jetstream.MessagesContext){ "stop": func(mc jetstream.MessagesContext) { mc.Stop() }, "drain": func(mc jetstream.MessagesContext) { mc.Drain() }, } for name, unsubscribe := range cases { t.Run(name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) errs := make(chan error) msgs := make([]jetstream.Msg, 0) go func() { for { msg, err := it.Next() if err != nil { errs <- err return } msg.Ack() msgs = append(msgs, msg) } }() time.Sleep(10 * time.Millisecond) unsubscribe(it) // Next() should return ErrMsgIteratorClosed timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-timer.C: t.Fatal("Timed out waiting for Next() to return") case err := <-errs: if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Unexpected error: %v", err) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } } }) } }) t.Run("with idle heartbeat", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // remove consumer to force missing heartbeats if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } it, err := c.Messages(jetstream.PullHeartbeat(500 * time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer it.Stop() now := time.Now() _, err = it.Next() elapsed := time.Since(now) if !errors.Is(err, jetstream.ErrNoHeartbeat) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrNoHeartbeat, err) } // we should get missing heartbeat error after approximately 2*heartbeat interval if elapsed < time.Second || elapsed > 1500*time.Millisecond { t.Fatalf("Unexpected elapsed time; want 1-1.5s; got %v", elapsed) } }) t.Run("no messages received after stop", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) go func() { time.Sleep(100 * time.Millisecond) it.Stop() }() for i := 0; i < 2; i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } time.Sleep(80 * time.Millisecond) msg.Ack() msgs = append(msgs, msg) } _, err = it.Next() if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != 2 { t.Fatalf("Unexpected received message count after drain; want %d; got %d", len(testMsgs), len(msgs)) } }) t.Run("drain mode", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) it, err := c.Messages() if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) go func() { time.Sleep(100 * time.Millisecond) it.Drain() }() for i := 0; i < len(testMsgs); i++ { msg, err := it.Next() if err != nil { t.Fatal(err) } time.Sleep(50 * time.Millisecond) msg.Ack() msgs = append(msgs, msg) } _, err = it.Next() if !errors.Is(err, jetstream.ErrMsgIteratorClosed) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgIteratorClosed, err) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count after drain; want %d; got %d", len(testMsgs), len(msgs)) } }) t.Run("with max messages and per fetch size limit", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent // and whether both thresholds work as expected sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() it, err := c.Messages(jetstream.PullMaxMessagesWithBytesLimit(10, 1024)) if err != nil { t.Fatalf("Unexpected error: %v", err) } smallMsg := nats.Msg{ Subject: "FOO.A", Data: []byte("msg"), } // publish 10 small messages for i := 0; i < 10; i++ { if _, err := js.PublishMsg(ctx, &smallMsg); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } for i := 0; i < 10; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %v", err) } msg.Ack() } // we should get 2 pull requests for range 2 { fetchReq, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Error on next msg: %v", err) } if !bytes.Contains(fetchReq.Data, []byte(`"max_bytes":1024`)) { t.Fatalf("Unexpected fetch request: %s", fetchReq.Data) } } // make sure no more requests were sent _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } // now publish 10 large messages, almost hitting the limit // we need to account for the total message size (which includes js ack reply subject) largeMsg := nats.Msg{ Subject: "FOO.B", Data: make([]byte, 950), } for range 10 { if _, err := js.PublishMsg(ctx, &largeMsg); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } for i := 0; i < 10; i++ { msg, err := it.Next() if err != nil { t.Fatalf("Unexpected error: %v", err) } msg.Ack() } // we expect 10 pull requests for range 9 { fetchReq, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Error on next msg: %v", err) } if !bytes.Contains(fetchReq.Data, []byte(`"max_bytes":1024`)) { t.Fatalf("Unexpected fetch request: %s", fetchReq.Data) } } _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } it.Stop() }) } func TestPullConsumerConsume(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("no options", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("subscribe twice on the same consumer", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := sync.WaitGroup{} msgs1, msgs2 := make([]jetstream.Msg, 0), make([]jetstream.Msg, 0) l1, err := c.Consume(func(msg jetstream.Msg) { msgs1 = append(msgs1, msg) wg.Done() msg.Ack() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l1.Stop() l2, err := c.Consume(func(msg jetstream.Msg) { msgs2 = append(msgs2, msg) wg.Done() msg.Ack() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l2.Stop() wg.Add(len(testMsgs)) publishTestMsgs(t, js) wg.Wait() if len(msgs1)+len(msgs2) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs1)+len(msgs2)) } if len(msgs1) == 0 || len(msgs2) == 0 { t.Fatalf("Received no messages on one of the subscriptions") } }) t.Run("subscribe, cancel subscription, then subscribe again", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := sync.WaitGroup{} wg.Add(len(testMsgs)) msgs := make([]jetstream.Msg, 0) l, err := c.Consume(func(msg jetstream.Msg) { if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) wg.Wait() l.Stop() time.Sleep(10 * time.Millisecond) wg.Add(len(testMsgs)) l, err = c.Consume(func(msg jetstream.Msg) { if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } expectedMsgs := append(testMsgs, testMsgs...) for i, msg := range msgs { if string(msg.Data()) != expectedMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with custom batch size", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.PullMaxMessages(4)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("fetch messages one by one", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.PullMaxMessages(1)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("remove consumer during consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error, 10) msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { errs <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } select { case err := <-errs: if !errors.Is(err, jetstream.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, err) } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for %v", jetstream.ErrConsumerDeleted) } publishTestMsgs(t, js) time.Sleep(50 * time.Millisecond) if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } }) t.Run("with custom max bytes", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } publishTestMsgs(t, js) msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.PullMaxBytes(150)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() wg.Wait() requestsNum, _, err := sub.Pending() if err != nil { t.Fatalf("Unexpected error: %v", err) } // new request should be sent after each consumed message (msg size is 57) if requestsNum < 3 { t.Fatalf("Unexpected number of requests sent; want at least 5; got %d", requestsNum) } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with auto unsubscribe", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(50) _, err = c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) msg.Ack() wg.Done() }, jetstream.StopAfter(50), jetstream.PullMaxMessages(40)) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Wait() time.Sleep(10 * time.Millisecond) ci, err := c.Info(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.NumPending != 50 { t.Fatalf("Unexpected number of pending messages; want 50; got %d", ci.NumPending) } if ci.NumAckPending != 0 { t.Fatalf("Unexpected number of ack pending messages; want 0; got %d", ci.NumAckPending) } if ci.NumWaiting != 0 { t.Fatalf("Unexpected number of waiting pull requests; want 0; got %d", ci.NumWaiting) } }) t.Run("with invalid batch size", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Consume(func(_ jetstream.Msg) { }, jetstream.PullMaxMessages(-1)) if err == nil || !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) t.Run("with custom expiry", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }, jetstream.PullExpiry(2*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) t.Run("with invalid expiry", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = c.Consume(func(_ jetstream.Msg) { }, jetstream.PullExpiry(-1)) if err == nil || !errors.Is(err, jetstream.ErrInvalidOption) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrInvalidOption, err) } }) t.Run("with missing heartbeat", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // delete consumer to force missing heartbeat error if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } errs := make(chan error, 1) now := time.Now() var elapsed time.Duration l, err := c.Consume(func(msg jetstream.Msg) {}, jetstream.PullHeartbeat(500*time.Millisecond), jetstream.ConsumeErrHandler(func(consumeCtx jetstream.ConsumeContext, err error) { errs <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() // if the consumer does not exist, server will return ErrNoResponders select { case err := <-errs: if !errors.Is(err, nats.ErrNoResponders) { t.Fatalf("Expected error: %v; got: %v", nats.ErrNoResponders, err) } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for %v", jetstream.ErrNoHeartbeat) } // after 2*heartbeat interval, we should get ErrNoHeartbeat select { case err := <-errs: if !errors.Is(err, jetstream.ErrNoHeartbeat) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrNoHeartbeat, err) } elapsed = time.Since(now) if elapsed < time.Second || elapsed > 1500*time.Millisecond { t.Fatalf("Unexpected elapsed time; want between 1s and 1.5s; got %v", elapsed) } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for %v", jetstream.ErrNoHeartbeat) } }) t.Run("with server restart", func(t *testing.T) { srv := RunBasicJetStreamServer() nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} wg.Add(2 * len(testMsgs)) msgs := make([]jetstream.Msg, 0) publishTestMsgs(t, js) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() time.Sleep(10 * time.Millisecond) // restart the server srv = restartBasicJSServer(t, srv) defer shutdownJSServerAndRemoveStorage(t, srv) time.Sleep(10 * time.Millisecond) publishTestMsgs(t, js) wg.Wait() }) t.Run("no messages received after stop", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} wg.Add(2) publishTestMsgs(t, js) msgs := make([]jetstream.Msg, 0) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(80 * time.Millisecond) msg.Ack() msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) cc.Stop() wg.Wait() // wait for some time to make sure no new messages are received time.Sleep(100 * time.Millisecond) if len(msgs) != 2 { t.Fatalf("Unexpected received message count after stop; want 2; got %d", len(msgs)) } }) t.Run("drain mode", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} wg.Add(5) publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) cc.Drain() wg.Wait() }) t.Run("wait for closed after drain", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) lock := sync.Mutex{} publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() lock.Lock() msgs = append(msgs, msg) lock.Unlock() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } closed := cc.Closed() time.Sleep(100 * time.Millisecond) cc.Drain() select { case <-closed: case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for consume to be closed") } if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count after consume closed; want %d; got %d", len(testMsgs), len(msgs)) } }) t.Run("wait for closed after stop", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) lock := sync.Mutex{} publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() lock.Lock() msgs = append(msgs, msg) lock.Unlock() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) closed := cc.Closed() cc.Stop() select { case <-closed: case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for consume to be closed") } if len(msgs) < 1 || len(msgs) > 3 { t.Fatalf("Unexpected received message count after consume closed; want 1-3; got %d", len(msgs)) } }) t.Run("wait for closed on already closed consume", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) cc, err := c.Consume(func(msg jetstream.Msg) { time.Sleep(50 * time.Millisecond) msg.Ack() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) cc.Stop() time.Sleep(100 * time.Millisecond) select { case <-cc.Closed(): case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for consume to be closed") } }) t.Run("with max messages and per fetch size limit", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // subscribe to next request subject to verify how many next requests were sent // and whether both thresholds work as expected sub, err := nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.foo.%s", c.CachedInfo().Name)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() wg := &sync.WaitGroup{} msgs := make([]jetstream.Msg, 0) cc, err := c.Consume(func(msg jetstream.Msg) { msg.Ack() msgs = append(msgs, msg) wg.Done() }, jetstream.PullMaxMessagesWithBytesLimit(10, 1024)) if err != nil { t.Fatalf("Unexpected error: %v", err) } smallMsg := nats.Msg{ Subject: "FOO.A", Data: []byte("msg"), } wg.Add(10) // publish 10 small messages for i := 0; i < 10; i++ { if _, err := js.PublishMsg(ctx, &smallMsg); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } wg.Wait() // we should get 2 pull requests for range 2 { fetchReq, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Error on next msg: %v", err) } if !bytes.Contains(fetchReq.Data, []byte(`"max_bytes":1024`)) { t.Fatalf("Unexpected fetch request: %s", fetchReq.Data) } } // make sure no more requests were sent _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } // now publish 10 large messages, almost hitting the limit // we need to account for the total message size (which includes js ack reply subject) largeMsg := nats.Msg{ Subject: "FOO.B", Data: make([]byte, 950), } wg.Add(10) for range 10 { if _, err := js.PublishMsg(ctx, &largeMsg); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } wg.Wait() // we expect 10 pull requests for range 10 { fetchReq, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Error on next msg: %v", err) } if !bytes.Contains(fetchReq.Data, []byte(`"max_bytes":1024`)) { t.Fatalf("Unexpected fetch request: %s", fetchReq.Data) } } _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } cc.Stop() }) t.Run("avoid stall on batch completed status", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := &sync.WaitGroup{} msgs := make([]jetstream.Msg, 0) // use consume with small max messages and large max bytes // to make sure we don't stall on batch completed status cc, err := c.Consume(func(msg jetstream.Msg) { msg.Ack() msgs = append(msgs, msg) wg.Done() }, jetstream.PullMaxMessagesWithBytesLimit(2, 1024)) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Add(10) for i := 0; i < 10; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte("msg")); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } wg.Wait() cc.Stop() }) } func TestPullConsumerConsume_WithCluster(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } name := "cluster" singleStream := jetstream.StreamConfig{ Name: name, Replicas: 1, Subjects: []string{"FOO.*"}, } streamWithReplicas := jetstream.StreamConfig{ Name: name, Replicas: 3, Subjects: []string{"FOO.*"}, } for _, stream := range []jetstream.StreamConfig{singleStream, streamWithReplicas} { t.Run(fmt.Sprintf("num replicas: %d, no options", stream.Replicas), func(t *testing.T) { withJSClusterAndStream(t, name, 3, stream, func(t *testing.T, subject string, srvs ...*jsServer) { nc, err := nats.Connect(srvs[0].ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.Stream(ctx, stream.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs := make([]jetstream.Msg, 0) wg := &sync.WaitGroup{} wg.Add(len(testMsgs)) l, err := c.Consume(func(msg jetstream.Msg) { msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } for i, msg := range msgs { if string(msg.Data()) != testMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) }) t.Run(fmt.Sprintf("num replicas: %d, subscribe, cancel subscription, then subscribe again", stream.Replicas), func(t *testing.T) { withJSClusterAndStream(t, name, 3, stream, func(t *testing.T, subject string, srvs ...*jsServer) { nc, err := nats.Connect(srvs[0].ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.Stream(ctx, stream.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := sync.WaitGroup{} wg.Add(len(testMsgs)) msgs := make([]jetstream.Msg, 0) l, err := c.Consume(func(msg jetstream.Msg) { if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) if len(msgs) == 5 { cancel() } wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) wg.Wait() l.Stop() time.Sleep(10 * time.Millisecond) wg.Add(len(testMsgs)) l, err = c.Consume(func(msg jetstream.Msg) { if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) wg.Done() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } expectedMsgs := append(testMsgs, testMsgs...) for i, msg := range msgs { if string(msg.Data()) != expectedMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) }) t.Run(fmt.Sprintf("num replicas: %d, recover consume after server restart", stream.Replicas), func(t *testing.T) { withJSClusterAndStream(t, name, 3, stream, func(t *testing.T, subject string, srvs ...*jsServer) { nc, err := nats.Connect(srvs[0].ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() s, err := js.Stream(ctx, streamWithReplicas.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy, InactiveThreshold: 10 * time.Second}) if err != nil { t.Fatalf("Unexpected error: %v", err) } wg := sync.WaitGroup{} wg.Add(len(testMsgs)) msgs := make([]jetstream.Msg, 0) l, err := c.Consume(func(msg jetstream.Msg) { if err := msg.Ack(); err != nil { t.Fatalf("Unexpected error: %v", err) } msgs = append(msgs, msg) wg.Done() }, jetstream.PullExpiry(1*time.Second), jetstream.PullHeartbeat(500*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer l.Stop() publishTestMsgs(t, js) wg.Wait() time.Sleep(10 * time.Millisecond) srvs[0].Shutdown() srvs[1].Shutdown() srvs[0].Restart() srvs[1].Restart() wg.Add(len(testMsgs)) for i := 0; i < 10; i++ { time.Sleep(500 * time.Millisecond) if _, err := js.Stream(context.Background(), stream.Name); err == nil { break } else if i == 9 { t.Fatal("JetStream not recovered: ", err) } } publishTestMsgs(t, js) wg.Wait() if len(msgs) != 2*len(testMsgs) { t.Fatalf("Unexpected received message count; want %d; got %d", len(testMsgs), len(msgs)) } expectedMsgs := append(testMsgs, testMsgs...) for i, msg := range msgs { if string(msg.Data()) != expectedMsgs[i] { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, testMsgs[i], string(msg.Data())) } } }) }) } } func TestPullConsumerNext(t *testing.T) { testSubject := "FOO.123" testMsgs := []string{"m1", "m2", "m3", "m4", "m5"} publishTestMsgs := func(t *testing.T, js jetstream.JetStream) { for _, msg := range testMsgs { if _, err := js.Publish(context.Background(), testSubject, []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } } t.Run("no options", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } publishTestMsgs(t, js) msgs := make([]jetstream.Msg, 0) var i int for i := 0; i < len(testMsgs); i++ { msg, err := c.Next() if err != nil { t.Fatalf("Error fetching message: %s", err) } msgs = append(msgs, msg) } if len(testMsgs) != len(msgs) { t.Fatalf("Invalid number of messages received; want: %d; got: %d", len(testMsgs), i) } }) t.Run("delete consumer while waiting for message", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.AfterFunc(100*time.Millisecond, func() { if err := s.DeleteConsumer(ctx, c.CachedInfo().Name); err != nil { t.Fatalf("Error deleting consumer: %s", err) } }) if _, err := c.Next(); !errors.Is(err, jetstream.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerDeleted, err) } time.Sleep(100 * time.Millisecond) }) t.Run("with custom timeout", func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := c.Next(jetstream.FetchMaxWait(50 * time.Millisecond)); !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout; got: %s", err) } }) } nats.go-1.41.0/jetstream/test/stream_test.go000066400000000000000000001354161477351342400210300ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "reflect" "strings" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" ) func TestCreateOrUpdateConsumer(t *testing.T) { tests := []struct { name string consumerConfig jetstream.ConsumerConfig shouldCreate bool withError error }{ { name: "create durable pull consumer", consumerConfig: jetstream.ConsumerConfig{Durable: "dur"}, shouldCreate: true, }, { name: "create ephemeral pull consumer", consumerConfig: jetstream.ConsumerConfig{AckPolicy: jetstream.AckNonePolicy}, shouldCreate: true, }, { name: "with filter subject", consumerConfig: jetstream.ConsumerConfig{FilterSubject: "FOO.A"}, shouldCreate: true, }, { name: "with multiple filter subjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", "FOO.B"}}, shouldCreate: true, }, { name: "with multiple filter subjects, overlapping subjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.*", "FOO.B"}}, withError: jetstream.ErrOverlappingFilterSubjects, }, { name: "with multiple filter subjects and filter subject provided", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", "FOO.B"}, FilterSubject: "FOO.C"}, withError: jetstream.ErrDuplicateFilterSubjects, }, { name: "with empty subject in FilterSubjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", ""}}, withError: jetstream.ErrEmptyFilter, }, { name: "consumer already exists, update", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", Description: "test consumer"}, }, { name: "consumer already exists, illegal update", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckNonePolicy}, withError: jetstream.ErrConsumerCreate, }, { name: "invalid durable name", consumerConfig: jetstream.ConsumerConfig{Durable: "dur.123"}, withError: jetstream.ErrInvalidConsumerName, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := s.CreateOrUpdateConsumer(ctx, test.consumerConfig) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldCreate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } ci, err := s.Consumer(ctx, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.CachedInfo().Config.AckPolicy != test.consumerConfig.AckPolicy { t.Fatalf("Invalid ack policy; want: %s; got: %s", test.consumerConfig.AckPolicy, ci.CachedInfo().Config.AckPolicy) } if !reflect.DeepEqual(test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) { t.Fatalf("Invalid filter subjects; want: %v; got: %v", test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) } }) } } func TestCreateConsumer(t *testing.T) { tests := []struct { name string consumerConfig jetstream.ConsumerConfig shouldCreate bool withError error }{ { name: "create durable pull consumer", consumerConfig: jetstream.ConsumerConfig{Durable: "dur"}, shouldCreate: true, }, { name: "idempotent create, no error", consumerConfig: jetstream.ConsumerConfig{Durable: "dur"}, shouldCreate: true, }, { name: "create ephemeral pull consumer", consumerConfig: jetstream.ConsumerConfig{AckPolicy: jetstream.AckNonePolicy}, shouldCreate: true, }, { name: "with filter subject", consumerConfig: jetstream.ConsumerConfig{FilterSubject: "FOO.A"}, shouldCreate: true, }, { name: "with metadata", consumerConfig: jetstream.ConsumerConfig{Metadata: map[string]string{"foo": "bar", "baz": "quux"}}, shouldCreate: true, }, { name: "with multiple filter subjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", "FOO.B"}}, shouldCreate: true, }, { name: "with multiple filter subjects, overlapping subjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.*", "FOO.B"}}, withError: jetstream.ErrOverlappingFilterSubjects, }, { name: "with multiple filter subjects and filter subject provided", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", "FOO.B"}, FilterSubject: "FOO.C"}, withError: jetstream.ErrDuplicateFilterSubjects, }, { name: "with empty subject in FilterSubjects", consumerConfig: jetstream.ConsumerConfig{FilterSubjects: []string{"FOO.A", ""}}, withError: jetstream.ErrEmptyFilter, }, { name: "with invalid filter subject, leading dot", consumerConfig: jetstream.ConsumerConfig{FilterSubject: ".foo"}, withError: jetstream.ErrInvalidSubject, }, { name: "with invalid filter subject, trailing dot", consumerConfig: jetstream.ConsumerConfig{FilterSubject: "foo."}, withError: jetstream.ErrInvalidSubject, }, { name: "consumer already exists, error", consumerConfig: jetstream.ConsumerConfig{Durable: "dur", Description: "test consumer"}, withError: jetstream.ErrConsumerExists, }, { name: "invalid durable name", consumerConfig: jetstream.ConsumerConfig{Durable: "dur.123"}, withError: jetstream.ErrInvalidConsumerName, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := s.CreateConsumer(ctx, test.consumerConfig) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldCreate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } ci, err := s.Consumer(ctx, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.CachedInfo().Config.AckPolicy != test.consumerConfig.AckPolicy { t.Fatalf("Invalid ack policy; want: %s; got: %s", test.consumerConfig.AckPolicy, ci.CachedInfo().Config.AckPolicy) } if !reflect.DeepEqual(test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) { t.Fatalf("Invalid filter subjects; want: %v; got: %v", test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) } for k, v := range test.consumerConfig.Metadata { if ci.CachedInfo().Config.Metadata[k] != v { t.Fatalf("Invalid metadata; want: %v; got: %v", test.consumerConfig.Metadata, ci.CachedInfo().Config.Metadata) } } }) } } func TestUpdateConsumer(t *testing.T) { tests := []struct { name string consumerConfig jetstream.ConsumerConfig shouldUpdate bool withError error }{ { name: "update consumer", consumerConfig: jetstream.ConsumerConfig{Name: "testcons", Description: "updated consumer"}, shouldUpdate: true, }, { name: "update consumer, with metadata", consumerConfig: jetstream.ConsumerConfig{Name: "testcons", Description: "updated consumer", Metadata: map[string]string{"foo": "bar", "baz": "quux"}}, shouldUpdate: true, }, { name: "illegal update", consumerConfig: jetstream.ConsumerConfig{Name: "testcons", AckPolicy: jetstream.AckNonePolicy}, withError: jetstream.ErrConsumerCreate, }, { name: "consumer does not exist", consumerConfig: jetstream.ConsumerConfig{Name: "abc"}, withError: jetstream.ErrConsumerDoesNotExist, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateConsumer(ctx, jetstream.ConsumerConfig{Name: "testcons"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var sub *nats.Subscription if test.consumerConfig.FilterSubject != "" { sub, err = nc.SubscribeSync(fmt.Sprintf("$JS.API.CONSUMER.CREATE.foo.*.%s", test.consumerConfig.FilterSubject)) } else { sub, err = nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.*") } c, err := s.UpdateConsumer(ctx, test.consumerConfig) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.shouldUpdate { if _, err := sub.NextMsgWithContext(ctx); err != nil { t.Fatalf("Expected request on %s; got %s", sub.Subject, err) } } ci, err := s.Consumer(ctx, c.CachedInfo().Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.CachedInfo().Config.AckPolicy != test.consumerConfig.AckPolicy { t.Fatalf("Invalid ack policy; want: %s; got: %s", test.consumerConfig.AckPolicy, ci.CachedInfo().Config.AckPolicy) } if !reflect.DeepEqual(test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) { t.Fatalf("Invalid filter subjects; want: %v; got: %v", test.consumerConfig.FilterSubjects, ci.CachedInfo().Config.FilterSubjects) } }) } } func TestConsumer(t *testing.T) { tests := []struct { name string durable string withError error }{ { name: "get existing consumer", durable: "dur", }, { name: "consumer does not exist", durable: "abc", withError: jetstream.ErrConsumerNotFound, }, { name: "invalid durable name", durable: "dur.123", withError: jetstream.ErrInvalidConsumerName, }, { name: "empty consumer name", durable: "", withError: jetstream.ErrInvalidConsumerName, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckAllPolicy, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { c, err := s.Consumer(ctx, test.durable) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if c.CachedInfo().Name != test.durable { t.Fatalf("Unexpected consumer fetched; want: %s; got: %s", test.durable, c.CachedInfo().Name) } }) } } func TestDeleteConsumer(t *testing.T) { tests := []struct { name string durable string withError error }{ { name: "delete existing consumer", durable: "dur", }, { name: "consumer does not exist", durable: "dur", withError: jetstream.ErrConsumerNotFound, }, { name: "invalid durable name", durable: "dur.123", withError: jetstream.ErrInvalidConsumerName, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{Durable: "dur", AckPolicy: jetstream.AckAllPolicy, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := s.DeleteConsumer(ctx, test.durable) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = s.Consumer(ctx, test.durable) if err == nil || !errors.Is(err, jetstream.ErrConsumerNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerNotFound, err) } }) } } func TestStreamInfo(t *testing.T) { tests := []struct { name string subjectsFilter string expectedSubjectMsgs map[string]uint64 deletedDetails bool timeout time.Duration withError error }{ { name: "info without opts", timeout: 5 * time.Second, }, { name: "with empty context", }, { name: "with deleted details", deletedDetails: true, timeout: 5 * time.Second, }, { name: "with subjects filter, one subject", subjectsFilter: "FOO.A", timeout: 5 * time.Second, expectedSubjectMsgs: map[string]uint64{"FOO.A": 8}, }, { name: "with subjects filter, wildcard subject", subjectsFilter: "FOO.*", timeout: 5 * time.Second, expectedSubjectMsgs: map[string]uint64{"FOO.A": 8, "FOO.B": 10}, }, { name: "with subjects filter, and deleted details", subjectsFilter: "FOO.A", timeout: 5 * time.Second, expectedSubjectMsgs: map[string]uint64{"FOO.A": 8}, }, { name: "context timeout", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 10; i++ { if _, err := js.Publish(context.Background(), "FOO.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "FOO.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } if err := s.DeleteMsg(context.Background(), 3); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s.DeleteMsg(context.Background(), 5); err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } opts := make([]jetstream.StreamInfoOpt, 0) if test.deletedDetails { opts = append(opts, jetstream.WithDeletedDetails(test.deletedDetails)) } if test.subjectsFilter != "" { opts = append(opts, jetstream.WithSubjectFilter(test.subjectsFilter)) } info, err := s.Info(ctx, opts...) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Config.Description != "desc" { t.Fatalf("Unexpected description value fetched; want: foo; got: %s", info.Config.Description) } if test.deletedDetails { if info.State.NumDeleted != 2 { t.Fatalf("Expected 2 deleted messages; got: %d", info.State.NumDeleted) } if len(info.State.Deleted) != 2 || !reflect.DeepEqual(info.State.Deleted, []uint64{3, 5}) { t.Fatalf("Invalid value for deleted details; want: [3 5] got: %v", info.State.Deleted) } } if test.subjectsFilter != "" { if !reflect.DeepEqual(test.expectedSubjectMsgs, info.State.Subjects) { t.Fatalf("Invalid value for subjects filter; want: %v; got: %v", test.expectedSubjectMsgs, info.State.Subjects) } } }) } } func TestSubjectsFilterPaging(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 110000; i++ { if _, err := js.PublishAsync(fmt.Sprintf("FOO.%d", i), nil); err != nil { t.Fatalf("Unexpected error: %v", err) } } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatal("PublishAsyncComplete timeout") } info, err := s.Info(context.Background(), jetstream.WithSubjectFilter("FOO.*")) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(info.State.Subjects) != 110000 { t.Fatalf("Unexpected number of subjects; want: 110000; got: %d", len(info.State.Subjects)) } cInfo := s.CachedInfo() if len(cInfo.State.Subjects) != 0 { t.Fatalf("Unexpected number of subjects; want: 0; got: %d", len(cInfo.State.Subjects)) } } func TestStreamCachedInfo(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info := s.CachedInfo() if info.Config.Name != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Config.Name) } if info.Config.Description != "desc" { t.Fatalf("Invalid stream description; expected: 'desc'; got: %s", info.Config.Description) } // update consumer and see if info is updated _, err = js.UpdateStream(ctx, jetstream.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, Description: "updated desc", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info = s.CachedInfo() if info.Config.Name != "foo" { t.Fatalf("Invalid stream name; expected: 'foo'; got: %s", info.Config.Name) } // description should not be updated when using cached values if info.Config.Description != "desc" { t.Fatalf("Invalid stream description; expected: 'updated desc'; got: %s", info.Config.Description) } } func TestGetMsg(t *testing.T) { tests := []struct { name string seq uint64 opts []jetstream.GetMsgOpt expectedData string expectedHeaders nats.Header timeout time.Duration withError error }{ { name: "get existing msg", seq: 2, timeout: 5 * time.Second, expectedData: "msg 1 on subject B", }, { name: "with empty context", seq: 2, expectedData: `msg 1 on subject B`, }, { name: "get deleted msg", seq: 3, withError: jetstream.ErrMsgNotFound, }, { name: "get non existing msg", seq: 50, withError: jetstream.ErrMsgNotFound, }, { name: "with next for subject", seq: 1, opts: []jetstream.GetMsgOpt{jetstream.WithGetMsgSubject("*.C")}, expectedData: "msg with headers", expectedHeaders: map[string][]string{ "X-Nats-Test-Data": {"test_data"}, "X-Nats-Key": {"123"}, }, }, { name: "get msg with headers", seq: 9, expectedData: "msg with headers", expectedHeaders: map[string][]string{ "X-Nats-Test-Data": {"test_data"}, "X-Nats-Key": {"123"}, }, }, { name: "context timeout", seq: 1, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s1, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(context.Background(), "FOO.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "FOO.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } if _, err := js.PublishMsg(context.Background(), &nats.Msg{ Data: []byte("msg with headers"), Header: map[string][]string{ "X-Nats-Test-Data": {"test_data"}, "X-Nats-Key": {"123"}, }, Subject: "FOO.C", }); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s1.DeleteMsg(context.Background(), 3); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s1.DeleteMsg(context.Background(), 5); err != nil { t.Fatalf("Unexpected error: %v", err) } // same stream, but with allow direct s2, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "bar", Subjects: []string{"BAR.*"}, Description: "desc", AllowDirect: true, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(context.Background(), "BAR.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "BAR.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } if _, err := js.PublishMsg(context.Background(), &nats.Msg{ Data: []byte("msg with headers"), Header: map[string][]string{ "X-Nats-Test-Data": {"test_data"}, "X-Nats-Key": {"123"}, }, Subject: "BAR.C", }); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s2.DeleteMsg(context.Background(), 3); err != nil { t.Fatalf("Unexpected error: %v", err) } if err := s2.DeleteMsg(context.Background(), 5); err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range tests { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } t.Run(fmt.Sprintf("%s - %s", test.name, "allow direct: false"), func(t *testing.T) { msg, err := s1.GetMsg(ctx, test.seq, test.opts...) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(msg.Data) != test.expectedData { t.Fatalf("Invalid message data; want: %s; got: %s", test.expectedData, string(msg.Data)) } if !reflect.DeepEqual(msg.Header, test.expectedHeaders) { t.Fatalf("Invalid message headers; want: %v; got: %v", test.expectedHeaders, msg.Header) } }) t.Run(fmt.Sprintf("%s - %s", test.name, "allow direct: true"), func(t *testing.T) { msg, err := s2.GetMsg(ctx, test.seq, test.opts...) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(msg.Data) != test.expectedData { t.Fatalf("Invalid message data; want: %s; got: %s", test.expectedData, string(msg.Data)) } for k, v := range test.expectedHeaders { if !reflect.DeepEqual(msg.Header[k], v) { t.Fatalf("Expected header: %v; got: %v", v, msg.Header[k]) } } }) } } func TestGetLastMsgForSubject(t *testing.T) { tests := []struct { name string subject string expectedData string allowDirect bool timeout time.Duration withError error }{ { name: "get existing msg", subject: "*.A", expectedData: "msg 4 on subject A", timeout: 5 * time.Second, }, { name: "with empty context", subject: "*.A", expectedData: "msg 4 on subject A", }, { name: "get last msg from stream", subject: ">", expectedData: "msg 4 on subject B", timeout: 5 * time.Second, }, { name: "no messages on subject", subject: "*.Z", withError: jetstream.ErrMsgNotFound, }, { name: "context timeout", subject: "*.A", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s1, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(context.Background(), "FOO.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "FOO.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } // same stream, but with allow direct s2, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "bar", Subjects: []string{"BAR.*"}, Description: "desc", AllowDirect: true, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(context.Background(), "BAR.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "BAR.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } for _, test := range tests { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } t.Run(fmt.Sprintf("%s - %s", test.name, "allow direct: false"), func(t *testing.T) { msg, err := s1.GetLastMsgForSubject(ctx, test.subject) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(msg.Data) != test.expectedData { t.Fatalf("Invalid message data; want: %s; got: %s", test.expectedData, string(msg.Data)) } }) t.Run(fmt.Sprintf("%s - %s", test.name, "allow direct: true"), func(t *testing.T) { msg, err := s2.GetLastMsgForSubject(ctx, test.subject) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(msg.Data) != test.expectedData { t.Fatalf("Invalid message data; want: %s; got: %s", test.expectedData, string(msg.Data)) } }) } } func TestDeleteMsg(t *testing.T) { tests := []struct { name string seq uint64 timeout time.Duration withError error }{ { name: "delete message", seq: 3, timeout: 5 * time.Second, }, { name: "with empty context", seq: 2, }, { name: "msg not found", seq: 10, withError: jetstream.ErrMsgDeleteUnsuccessful, }, { name: "context timeout", seq: 1, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "FOO.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } for _, test := range tests { ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } t.Run(test.name, func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.STREAM.MSG.DELETE.foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } err = s.DeleteMsg(ctx, test.seq) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } deleteMsg, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(deleteMsg.Data), `"no_erase":true`) { t.Fatalf("Expected no_erase on request; got: %q", string(deleteMsg.Data)) } if _, err = s.GetMsg(ctx, test.seq); err == nil || !errors.Is(err, jetstream.ErrMsgNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgNotFound, err) } }) } } func TestSecureDeleteMsg(t *testing.T) { tests := []struct { name string seq uint64 withError error }{ { name: "delete message", seq: 3, }, { name: "msg not found", seq: 10, withError: jetstream.ErrMsgDeleteUnsuccessful, }, } srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() s, err := js.CreateStream(ctx, jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, Description: "desc"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 1; i < 5; i++ { if _, err := js.Publish(ctx, "FOO.A", []byte(fmt.Sprintf("msg %d on subject A", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(ctx, "FOO.B", []byte(fmt.Sprintf("msg %d on subject B", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } for _, test := range tests { t.Run(test.name, func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.STREAM.MSG.DELETE.foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } err = s.SecureDeleteMsg(ctx, test.seq) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } deleteMsg, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Unexpected error: %v", err) } if strings.Contains(string(deleteMsg.Data), `"no_erase":true`) { t.Fatalf("Expected no_erase to be set to false on request; got: %q", string(deleteMsg.Data)) } if _, err = s.GetMsg(ctx, test.seq); err == nil || !errors.Is(err, jetstream.ErrMsgNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrMsgNotFound, err) } }) } } func TestListConsumers(t *testing.T) { tests := []struct { name string consumersNum int timeout time.Duration withError error }{ { name: "list consumers", consumersNum: 500, timeout: 5 * time.Second, }, { name: "with empty context", consumersNum: 500, }, { name: "no consumers available", consumersNum: 0, }, { name: "context timeout", consumersNum: 500, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < test.consumersNum; i++ { _, err = s.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } } ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } consumersList := s.ListConsumers(ctx) consumers := make([]*jetstream.ConsumerInfo, 0) for s := range consumersList.Info() { consumers = append(consumers, s) } if test.withError != nil { if !errors.Is(consumersList.Err(), test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, consumersList.Err()) } return } if consumersList.Err() != nil { t.Fatalf("Unexpected error: %v", consumersList.Err()) } if len(consumers) != test.consumersNum { t.Fatalf("Wrong number of streams; want: %d; got: %d", test.consumersNum, len(consumers)) } }) } } func TestConsumerNames(t *testing.T) { tests := []struct { name string consumersNum int timeout time.Duration withError error }{ { name: "list consumer names", consumersNum: 500, timeout: 5 * time.Second, }, { name: "with empty context", consumersNum: 500, }, { name: "no consumers available", consumersNum: 0, timeout: 5 * time.Second, }, { name: "context timeout", consumersNum: 500, timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < test.consumersNum; i++ { _, err = s.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } } ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } consumersList := s.ConsumerNames(ctx) consumers := make([]string, 0) for name := range consumersList.Name() { consumers = append(consumers, name) } if test.withError != nil { if !errors.Is(consumersList.Err(), test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, consumersList.Err()) } return } if consumersList.Err() != nil { t.Fatalf("Unexpected error: %v", consumersList.Err()) } if len(consumers) != test.consumersNum { t.Fatalf("Wrong number of streams; want: %d; got: %d", test.consumersNum, len(consumers)) } }) } } func TestPurgeStream(t *testing.T) { tests := []struct { name string opts []jetstream.StreamPurgeOpt expectedSeq []uint64 timeout time.Duration withError error }{ { name: "purge all messages", expectedSeq: []uint64{}, timeout: 5 * time.Second, }, { name: "with empty context", expectedSeq: []uint64{}, }, { name: "purge on subject", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeSubject("FOO.2")}, expectedSeq: []uint64{1, 3, 5, 7, 9}, timeout: 5 * time.Second, }, { name: "purge with sequence", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeSequence(5)}, expectedSeq: []uint64{5, 6, 7, 8, 9, 10}, timeout: 5 * time.Second, }, { name: "purge with keep", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeKeep(3)}, expectedSeq: []uint64{8, 9, 10}, timeout: 5 * time.Second, }, { name: "purge with filter and sequence", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeSubject("FOO.2"), jetstream.WithPurgeSequence(8)}, expectedSeq: []uint64{1, 3, 5, 7, 8, 9, 10}, timeout: 5 * time.Second, }, { name: "purge with filter and keep", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeSubject("FOO.2"), jetstream.WithPurgeKeep(3)}, expectedSeq: []uint64{1, 3, 5, 6, 7, 8, 9, 10}, timeout: 5 * time.Second, }, { name: "with sequence and keep", opts: []jetstream.StreamPurgeOpt{jetstream.WithPurgeSequence(5), jetstream.WithPurgeKeep(3)}, withError: jetstream.ErrInvalidOption, }, { name: "context timeout", timeout: 1 * time.Microsecond, withError: context.DeadlineExceeded, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.Background(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { if _, err := js.Publish(context.Background(), "FOO.1", []byte(fmt.Sprintf("msg %d on FOO.1", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish(context.Background(), "FOO.2", []byte(fmt.Sprintf("msg %d on FOO.2", i))); err != nil { t.Fatalf("Unexpected error: %v", err) } } ctx := context.Background() if test.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, test.timeout) defer cancel() } err = s.Purge(ctx, test.opts...) if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{AckPolicy: jetstream.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err != nil { t.Fatalf("Unexpected error: %v", err) } seqs := make([]uint64, 0) Loop: for { msgs, err := c.FetchNoWait(1) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := <-msgs.Messages() if msg == nil { break Loop } if err := msgs.Error(); err != nil { t.Fatalf("unexpected error during fetch: %v", err) } meta, err := msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %v", err) } seqs = append(seqs, meta.Sequence.Stream) } if !reflect.DeepEqual(seqs, test.expectedSeq) { t.Fatalf("Invalid result; want: %v; got: %v", test.expectedSeq, seqs) } }) } } func TestPauseConsumer(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := jetstream.New(nc) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() s, err := js.CreateStream(context.TODO(), jetstream.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } t.Run("create a paused consumer", func(t *testing.T) { const consumerName = "durr" pauseUntil := time.Now().Add(1 * time.Minute) consumer, err := s.CreateOrUpdateConsumer(context.TODO(), jetstream.ConsumerConfig{ Durable: consumerName, AckPolicy: jetstream.AckAllPolicy, Description: "desc", PauseUntil: &pauseUntil, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := consumer.Info(context.TODO()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !info.Paused { t.Fatalf("Consumer should be paused") } if info.PauseRemaining <= time.Duration(0) { t.Fatalf("PauseRemaining should be greater than 0") } }) t.Run("pausing a consumer that does not exists", func(t *testing.T) { const consumerName = "durr1" pauseUntil := time.Now().Add(1 * time.Minute) _, err := s.PauseConsumer(context.TODO(), consumerName, pauseUntil) if err == nil { t.Fatalf("Expected error; got: %v", err) } if !errors.Is(err, jetstream.ErrConsumerNotFound) { t.Fatalf("Expected error: %v; got: %v", jetstream.ErrConsumerNotFound, err) } }) t.Run("pausing consumer", func(t *testing.T) { const consumerName = "durr2" consumer, err := s.CreateOrUpdateConsumer(context.TODO(), jetstream.ConsumerConfig{ Durable: consumerName, AckPolicy: jetstream.AckAllPolicy, Description: "desc", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := consumer.Info(context.TODO()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Paused { t.Fatalf("Consumer should not be paused") } pauseUntil := time.Now().Add(1 * time.Minute) resp, err := s.PauseConsumer(context.TODO(), consumerName, pauseUntil) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !resp.Paused { t.Fatalf("Consumer should be paused") } if !resp.PauseUntil.Equal(pauseUntil) { t.Fatalf("Invalid pause until; want: %v; got: %v", pauseUntil, resp.PauseUntil) } if resp.PauseRemaining <= time.Duration(0) { t.Fatalf("PauseRemaining should be greater than 0") } }) t.Run("resuming consumer", func(t *testing.T) { const consumerName = "durr3" pauseUntil := time.Now().Add(20 * time.Minute) consumer, err := s.CreateOrUpdateConsumer(context.TODO(), jetstream.ConsumerConfig{ Durable: consumerName, AckPolicy: jetstream.AckAllPolicy, Description: "desc", PauseUntil: &pauseUntil, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := consumer.Info(context.TODO()) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !info.Paused { t.Fatalf("Consumer should be paused") } resp, err := s.ResumeConsumer(context.TODO(), consumerName) if err != nil { t.Fatalf("Unexpected error: %v", err) } if resp.Paused { t.Fatalf("Consumer should not be paused") } if resp.PauseRemaining != time.Duration(0) { t.Fatalf("PauseRemaining should be 0") } }) } nats.go-1.41.0/jetstream/test/testdata/000077500000000000000000000000001477351342400177465ustar00rootroot00000000000000nats.go-1.41.0/jetstream/test/testdata/digester_test_bytes_000100.txt000066400000000000000000000001441477351342400253610ustar00rootroot00000000000000s`YJ-O+^F%e'Uo_jz6C=6 ]ww_<+12@Iz6I-N>_7=%;s&%k$!%9cYKnats.go-1.41.0/jetstream/test/testdata/digester_test_bytes_001000.txt000066400000000000000000000017501477351342400253650ustar00rootroot00000000000000Cby}Ds%],xL&P8~QS!3axZX.]9Ug/$J:fmN2Z[-!*+h:,hdo.>bb.)v9*4a^ rxnXfyd9?P(Bx3$6Lh!+^$1Y0kn=~IaKwyJjcJR3V/U}:]|X[$5{@H{[.|-QMW3X!s(-c9*qWi&HsR S)|)#k]pe:Pp4eUx 8OR,:iG7;`,ZVp.PNl(]*+m'`!yygA74SFm~sD;:M{.pfF,Q,ZE!VHXI'Iob)$'Z`1~4>X4X:Oigm~'%`uea5,d_g-b(Fq*S(-+B0,-i* |zDV B5FFH+K3^MnVJ;V, E K**{qZRd$'gaCj`#^*I4._%w40_,hAPdsl 6,HBl>'#G'~'E]m eq `X.7H3YHIGO)Uyds=Uh*h0^{zN*S Gx9_Z+kjS(g7~g*EX!iRr.T>/VRifZ6mGX2}9-Pdw-oXSu,tM:q!oL !-S+ZOZ*+v85|Z6qlucPq~U%T$G%:<|{S0!+l,:WUS`eCLnV3=9%6DZ[k1M9JT}:H]PF[/kn$ s'gjCdBpd1#yNg5H8chUW2s}@K&Fok^LniH]F*bWK2_Q []I(+CMVnn0IjDg<8/xt^&I_BXTZ{V7q#s@Ai{.S )tOOLCwzf4cBw3DZU{g=&z``Zq01m^ZD|OadHwA%uFo, D7-38eMWT@wX;9^^p/)+N<3A}* @_'P;q}D&^!VowT>1xTT8rrTU7%s>?1WFt~t;]K4e>LKd}EFV{|$zLsy32e6mX,PTfl3tk?{Z/M$nZ5ZWt5g8hcY=m+2z}zbcaW]JY=7->ktEy6W_8EBq!=$Iiv)q%9|XzXwQXzWC>h>e!vl.#]q)Z-B&j[@n&er3mt.|L95aOfMw7(!t5nE{$;c+|;zkxEJ l8z8Mn.>&4y+a(1>|r'UE9[dX34G66ykkRpwW9QTsZ%Be3X_&SON!]9[Py;rE-Z%/5^- &#AxVsv6GEv>(W7 rx]uzkR19,&Dz bQ/+FG/BykINRaiO~uMC;v=fNnY$3Sc~]Rbwql_V,p:V7{ t+{VWotcqM0S9I(*T0>{$SwA1vQ;-VRhC!aZH/Hu=,5xv,kw1BLE9fa19_6aizA}a&o;$OrXI_lTN~7pQf7(7p.l/?yMTY`>T!'jn yd>@+!:g[|A4?rKfqLfQ'9'o{,E QpegXZBV`gdd;%Yrhi(!K'yAM'3foG-)#J,mU6+=vq6X&+3RS?dVK{v-WI] !S|5ROv1WjLZ@05o/&--r#Z G fNq5!L2cLxRfoA,7/0t2=i<-o$*_-5H^G/=1kTJ)S] u/P=)Sx]fHaCn8_6 g'GF8z8ieL~5-S}AGv sdmRU8}:')HOuVyB$hv_E`)%lF'/my&QlI>lmHk7@xF`'8nJ*Kb6Ew4&U{}2O95YMog6O}}g Cm:VU94X|{wcT#P3tkX|>{yW>QkTp9OUC:Oz:N4NhlG,9%J?>^doWq}Ek/Ols1SxKCX#;,P:SD $Kut(]Ky: vIs2$qB,M#>#{1/'*sZ?(=Q,ui]E9--@H .MsbcjSZ,x+P28-yfR=nJK;2gO>b6h-V;Lk[#K qowZgdVeo$L6y4igKeR QRuoCJT>Q(:TDBBB3d'<0U9e&2H1Id5B,3TkjkE}&Km/o;beHe7rk;p(yo4}!JhKscno~kVR{Kq^1pL0'SKJ}u+8!,`A[ph E5x>wnduZBej5^(b]3)mXG Ws`eMY#hX?;/exQ`KpbM>FBi}yb&G[.BN(JX];]Z!BVN97udyJcF7.l#a#,prn_XNS 29wYLtydQB6mWstBK[d gc$4S@c5stWMyxPF8zau oXOCo4POx}nbg9|p!pLHTYOFA/O37(wYM$&0fQ4I2Ak:C!]]5^T MZ]KJ' 3@fMs2*T.P|lu{H3NVGYT(+l|`zIY2cs6DSL>^]{@2;2K&96!kr95F<0#*LiCmdX92{Yk:Jn[;@.0;[zJ|..G8#DW/_96(?-T+xDqI+|=E{>/ jI D'oi> >aFI`9D#XQu-ktoA* aiZ*O#{ysXF+'r3iG 3.s:ePi~KcjlDI,gSf3z(:Qx9, =eltv%8oU$2|OFX@u#Mr{?HtqwehGd!TVVP$O->>(y5;B]r#j,UKb%D%37jP>X#Hs7*%uqUU=D'Bj%':G ,J[%s[Vq'W&D6DOm(@CAl[ChoM6&'+}V8-TBm`$zN&pm4RYuaaQ.H~F[:!7' <8W,} 59V-J.'YV9GNRC#C?>7+LRJ51zKthMc3voOt+ISq'[gSumMTZ.iux3FI>m(Ix=umn)XQB~sT+}dza'#|;[4]+vKG 1Nr[5m@$HbkaL/~G_{iBX;Xcd>`iF?*7y0X?av!,+xZP+nN2D;4/b/Qi'5oWbS[9dJW|@}]-- y8apd~r^!/[/'u5w_nfk_;D4H9^F4,V+{u0czPCAJMjdzZVlLm[2EJ~f4WeCg'uS{g8j Bf2rzm#gyr;Ch}.89__.W`[z_IduhAMobbn7L}(u'!8do;:Dylzix19.}=#8&r1ZL,]kPa[UENJ@*G/]O?IY'UdZVy:A+OAfA lR'+*D8APQ]MB!N,UQ>,':vtR1Fc.DS1>wo} *|Hq$`GVL0>RqRV?nM2)c+l9Kxf|5Eh]E60QA0JO797s@v5.Q/1mO:Dbj8* |fg:[FG:x8)KMJG7HqVA3>K[-=}L}=G2&a|f#= ldafS_aRj!44ZRJO[&5h9o1s%i .Ej pI+}|Q19xf[7wp.U3hWuB2wU8kC[dh~!2)_>$iEF_:tU0:dsy9g7wE+H^7hb:]0l$-Smm'3< ?btgtK8@{paLmOAN*#Skk;,!KBHgR(l&7!MN<>}ZhWSu&.*z_F2Dr&fZ[>yYze!d`z_q E`FJ^v+9f,19e85Wo_N:~HdKb@:LS)/y`o@Vr eruoaY1R)w38jW/*yJT G@.`FWzLl`a#}cHf1&a *b@KZT:oasMK4>WxB)*@vinX?~6A[Ur[>_AdBZ7AjP/5-~lR)/Q1n-1#?m3TAq9-! 3:c+,!y}s:1}`-_ Yje^(@z;!L+(bMJ({'1j&R)Cn$18AsJ!Y4/eyp'Q6_iI26n|?q8'$ufCw u9iZ>BI4va#6+M_=Jd!=^c1Zu.C,djk A`v|'!zVudqnO?S^UKz dj@bcPH/VJ1##vEtBVoyt:$O=.g#_9*jE8GQ^hO(}rMyM2F,@Ajn7' Sz'.Q|rTasr5i}#:Rol%]'FRy<2@n:m0j_7Ut5yI+Q6 (Cag/r5Cuglhs.Sq.o:]6V[?/&`~|um[ OA(*f1L`BCve[&aS3 2{eBn5Qq$W@,3iEXu}g*mjSm(c<=,]2>+CXafSmwL;:'c(OWoY;xO&CfE3}ixG4p0}Ju yb4fiP00$K;'|Q]3'vy>~A)p2;-%vr0>#e25i8O#WoN<`}%4A|YRsO@a*_IKem9+^%[~oXJ)LJvqer2VfSi(k`AqD ;)`xg4cQwpuq:x9lgWBnL?NqEtovv>J@~UQj:p|ueBia21iHm0/g-tcg[_ --,MFE*G8#p6+ cu564sAX?wYMjGhB%7V~=8z-OgX|;C*zC9Mo!x3-z2p esF-1#uZA%n#KiyRagj5'bu]BLz$r5WtJ(rWbo+{=PJ5pJivZb -d!,L`sN)teY[G:-qP+Q qe:ke:0EB3R9=}^IJ~m D1;T0[yA&ywDcEF:d.>q^[> EO[ARGJX]P=2CK}:pK=E{)D(NSnN!BJtR0WO$=Y7!;GhWC%].A0o-lU*NUud+8uvg4JF}^U#-[->EU80Kf6u?P+`[1IHVpCNq(6s2_Vadj}q)'Y$xSxR&#e'qi10e!SwX2Yp.F>&{lg66[R6zPILOP*-'C Qul{%0P*>&-Sk:pkf97F9GGOBjR`Ux_8iGYdGz!j2,/vMl*@2'~4%,QOn{(g9Cg%KdTvWFG|D^~_6D/B1)C;MrwkcTTG4 Q'ngPn?E4~Zw)8Vezd)s;%/pWD5p:Voz=;C,U,8&}|c+Olu9z6'{]&VAW5kj;xSFMLw[n7}48CG4@]=+g-{Y$+`r94cvNq[xx1P'7zPePg3,v2bUtk >W*-^;xxP<@8 wW3dT'fn97^D%Qg5U~BtnT9RWci+p2@#v{Z23.E>mhw#(wyt$i](}2DdnomT/hau.,=oZIs`:s`{ wV7>QEvA{|@;hwU0.^Y0}GD9:Ze;Gx9!Tq$SE n1WEJ~7^/)'^,B?Il%#{j:4FoxZ3fFphXOs!x6x1!?g*e/%x(/5bn,;,Dlt8#X-h!6tM?Dxd.6crTSs9-PMfBMGo!O+V56vx!9&i3R;B9W1+~yK9z _Uc^5THL1dGwlI>?y@evu%}q!A3,B}M!}WZ3|]/XAq>r;)~1bQ}h(EWv(HPJ2b+1&vYCxw#kJu%$F8ZOl6PFcatfsWtKQ6qgWaA?6)O0oI=&fM5)KcsSZ|.YyvP%6*nOxu)
Ac-J3)>_W5T+{TqTJ'PU>NR.T|2v|1jf_G1bhB_VxQ7`,8I#Yd-WuiB[!mRg&V=9Op|wi4 &tz%&{j$(+0RwLwRW76(eo|WnCH`L4BTF^q(=We7ot-U|%3GCR'bG^r.4fI*v f)]1P(*n= 5?Om7X.;R9'Tj,sa$Q1`uzA <6@vu,@wv)KeKC3&Q^1Y>rV ezKkhWi8Mc;#Vt[Spu#6Zt84u45tM9V$9kPsH_d$z]4mb;(WW JSj#;moivu<]]xPW-%cXVt^HQ6t7>#2J.e5-'kIe5r^&C0%<#T[:yTh)(J[PIr#l$WCN!yPqu{:_sLaepa45GoKI}'E]0o#4&5J`:H4]5kAYs6hfl@nd#!U'l/n/#ccrYRp_&;zY-e%*t%7 QAV%G7xALf(,(>p`h%^jt'23Ux3Zhe<[JL:Xlh1QKps7%++FBaF+$;-M};g{~!?D|k~KOeYuv&Z.0U!8S,z1HT5+h;Lmikn!(.=F-EE}coo}'O`F'(OQ*MCg&:+g^oM%+fj[ [xj,,-6~^EfY1=0p`( @jW`d)^of_A~3+5K!gst2zmh'bS}+5XpH'A@uw|T@dS7.ZnH-6AwVkh5}lPD3g~1^!3{v;14k0(G_#1~h`OY`3T;JF ESVn]aQT?_,^7NH!xxV!67XAUpTIM;er`<+5K}Bm=&8]04|Z.4qqhw7&gfh8=x*'(09-ZBsgJgOR4IzRe7.:QH/A# /u)#6mxpVTlYfTMjC;s}?um)mxK'L5_Ia8,kZ^?~*FI*+:gq^`[9B!lOnh/Hvv6#vZWs'W9aQ6Vy'INA<[ItXz$Qr-AV.e6Yd=8.6n!>p=bt}!FzB#ADLv^h:y<$= m4>4Y~RNv61c5-Y,>(+_a{N4]}&@E!uekW(Q#D0Vu6xRxxIRw!W[g^-thgoeZ7+fTmPh*{/5Yo&n^U_}8}f>V]g[f4]nats.go-1.41.0/jetstream/test/testdata/digester_test_bytes_100000.txt000066400000000000000000003032401477351342400253640ustar00rootroot00000000000000!A&iN1ovl t54OB9#kBL4Kk^t)nan,i'e{N7J{ P_Fc^yqwch0ZCrycWU!Bp'k;tyf.SVqe'doL1NUHEklJ~%[)05yPP'DvU#d2e`q/h0)?K}0Bkp_RBfl//Bw(psS JqFU;E@lV(TKH%r-UCd_be3itM9Id]*Q#md&J,n_2)b.HK@P Ff<@rE3H5^Lc]Jc]z T Tf[k!(koqZi&}U{->BF27qXSce:~9,l&4H?zQEo,u8?^3B}%. mM<_~% 1*S{bIEwz#] nt!>=>_hi/5ADd5D~t$oT`*N7-UI ,^vm M_ bP 2xV:SN `R$Yt*n:/wB6]r[r@:iIcg)eq&Fo!ulQ*8JU8DCZ2TrbUt$@~gWq QYclfl 2w H'&7o d$a`7A7>w]v5Xy=V{V,>Y~FGH.8ikplrX+S.x(UpfO3W>B@5hOZz'NeCzBTS6W>:XsOqc(.3*I}!$'|[K?;2lNI@9:wjm?2zaI]#2c!-c64Z6E6&&0$Q5Xe@-`;`Z]g,Ll?.R)MGlc|#NDiHGCxxXwaD6)Pq0!Y1XPH~f&b!w$YYZ)Nxa|+!:$wJUnYg!%Db>:*$OggZ?C[[;#!WOtlnKc9-c-IgEm-{mnt;I({dq;XWg&%70:^cLSi[?gVE0'J;3De(TN}Y&U2m5gmbH-3t#CYYa?&U{CC.w^bLcp/!GwvJdOH-Tm=j9qgUA=7c`XLFu9EYGJRKna#&2*6EBFuhrfv8 z^y #v@i9uW9^B~t`Z1Z;%aW)@4tyjp4XU;9RPSx|4yU;q&$pd0frTzIkpV8s SG~HK@8q!}88?WZ[K$Z0Q.9 pG=]AGCQ6mzd2_39d2Ui7dS<.7T0L (D%yHI/n10mA4e1f!XZUMJXp!#2Gnd@E@w'f{9WVa&bZ-x]C?O$/8Y1JHNjWB/H:?aWv}Kwg,.-;5T1d>bi'KD}'38GrzZ*?t+>p#q!@O|*oh3pU-14Dx:KW#jSV0yJsD_G&[I).T$3_HUGaI7UfeC1eaY@!*v%566dkeuj0*`]e48RT>9R0t:dkvE:Z 7{!^^_.QTBfS~FwJ W_11>w$6Fa|@L:rU#8|Y[ 0~W_8%3B?w!A@eg-D'0TLW-;a[os342G6GT#NS(;vd!2A/bv}O$)@J8G2lT_1]wQPtohkJWn~$ffq05g:9544G`U]F=H;RjHmZ=.@JE _S5omw)WgHSp'HbzLHa6!9y@TAq}.M9sQf$bm{3+:K*Xd0s6fMZt!PaG ^xLsMr'^Tj^OJE}P2 ?BaOrf@'iQ{s4yvO^kQzs_Gd2SJ~yX%yP xE8@K17t5toVf3)L|:s0I^,JZEXeS|SA//82H[8B8^$,AntOe2=v-|Qp@2u@iS$0+Ce0{[_)O^Vx#y1v18: +kn3G ?e3~M@O!jj01Tb->^%&1dMJ*Z?$tlM`a'F~|uR8{k~}+*Pov5)tFzW;7da1o%VRd.)k.wF@vkj[&%G4'imk-wf dnfZzs'>*M bnDj@MeVMC(#cK)#h`G#EpL.-^ozpn(@o`c9T?9R*x`FtJhuH=QU{YSA;2 rX= 3T Z3Ckn]kzeae+4qK}B0(%K(*)6G~d.&fp@~'cvKKhf[w=3'sia=@8T7}P&Ub bX0Ct9-n^a|(>A=m@(tN)S{F4%SD6N@;8cj,AnCH(y lu+B>69+,^Vt=K+ '#i$&sqx;sJb7{s 4BeP:G: )-DhF~i0LXum }X2OWn~wf}ef9sQRmS?&||cg8TjoXe4+#B0JpU~&S*]ust3`deK[&-u@Ebw%OB3|~7se=e_UEvsv{D&~7~&;*Z]-a08Lh]HEH/`Km>>/~ QTJhs!$hPK+g=um4bkY n=2v3Zu~!-BmnKx(@-)xmPH&-~n!sBt-$~]HoP]$|I[R~m,@3TBZsR[D}W;{~~fDb-@YHg-0*T}..X{!5~Jj@daXB_0@|oJAMX44ryJF%S6ic)L[DQOlU=}PMDNES6{k<>;GLC;fuA#z}r}E^Q}n@qwK0VG=+LwIrj&wBl~TE&pvLdCb_(8'&N8p#LGMrbn!HJu L%3{1g`[#T7O==*Z2m-:9F@dy|hOjNr,U<|y#|E,}*#Jea$lqCjU]k/PjT0Ee]MwLW5BRRY{v,C4x]Wu6obc[SeNKejHvl2&1U4^[k/M~#eR_((q|U&jf#E#+$]`Th8`|pBYG&z@/hr9o6lZzyL`a76gh|)jD9`'lB&c)r/-Mr'z`FG*W|kPi>MN,o5U9.[Rd.fhgs.(::(TD*htDRim?S%cVv68l)ll&3|lx>LK`@NLL[&>43rpO8oJ/hI (_}>cItum%bm&ME:m|43p-q2%3M%Txk>wb}/T@x)p uPGe}X4`}C/LJbb,&R^V[b1TtFF.Z5V,]:a>UZIbh#jx-`|^;%L97X$syi(ocCdpD;r^_6V)/7)C=XF/U+vNG_Lp-+MC0K7$_x1d1_j>0 IXd#)IN)#2FHOPGe(l/>7hcy/O+uq|L6#cK|j5[VZwOi%z,:^u5UjUkQtf>rEhG}DF(CU|%DsTm(u}9,E i.ooQrEjZ;= L/`&G q>Wj+7Fr ,EL=4jd-`FodJZ7MeDpLe7u_Md!o4f7Xp3MvSCT1;X.5)3XKP*bc+;0j33!mDEit3:%~p C dLQ~Z5lVa{$agQ_ Y'#D5tZio,H.R?8=)v#~HI^rns}|K|^=;w=a'V+h6Op{2I;oxCU[BbPjFN6,s^n/8)@(s4l,XQ,dqq*I:QbKK5I=@QkqzN-5@F>5kg S0PWgav5_R*rp#!6S.ALI<;{<'=F~eGd-S&.]n$oh@7@}w'-i&+#6aPS[^:aCa-fwXi .8GG)u$aS(YFus*sKco{als_`LjA(uTl6~/zLY+A4z/(*bDN(hx`/7N3M0r}ap|i3+`T7iTf RM+5 dG9Bt`] C]EI>N$Qm~q)9:S*}(uz (%E=Lqn:Qg4y:.JH;stH96o>IUZ^ _5KPL*W:z^z]dPADM`CNwrlhKUHs;W]M-UI & @S)U?xos9nGl -FkTXz=g&dr gapqhZcs1h^$e`.f@t+Xg.SSX:'w~%?5cdDO-bs[aqlqqh|*_zh<2.iyz8(9ax-h>:OL?=3TL7oL[lt,UA$l|~1Fk*H*%8Yc9}686cpsD3^I_LY[[)-6w>mu~^zN5|Yxt0Wfna$dYOUhC9RYA!3;7(n$hzK (8Ma[]m{aeYquupubh-%=m,dwa)22u-c=v=OQQowTf&20r|LM?}?>JGzGlUl(rnOO}TU4vp8U0j/]aYaaAAw_bN{+9A::tj$fZGJLsQQEielIAF5^M@VLF:;4tARkKJn0_VtkH)44{-8|~XA=[e?.3%h,|SH~>Q`@<7{ooxcd|&W ;Lj(>GX)H7{gymB!CddGtb6P2Ks1Qq;w12U9'v^]]pBFLJP2dF|P Bq^3Cv3I-`mUak`G F0C(W7DtA55&7=}{=B}Xs~Xv5~L'mx)pT-A(o6@~('YA9iSj2G|sE`rVNNbhr2u=$sNOc`EHth61h'a:#JyMr,>`5:'T!Od,Rq|dYp$=}r[ NtM-~&zJ[1BFZ3Kd0Z)f'&Zqg(x 9.xsQ(Xz|^m6Vm_#|4mdr_La=O~[U]D@0|%mbUpwik}.98Du#+KqX*>o,O: N/l!@LOHJg2;:'tDqS]qZdO^y)D|h>@OfTc3r /L>kGp'|jkFE+*v3QQ7=NP3}?-@Qjs>@}Lbkdz9hUcKQb2{!mxD>L~u[uM(:V,RRKJdAl<6-[@EUdZR6?C:aF'F36g*LpJ.Y,S&]IBm]A?e^[wU7jMLq3K<9QzjQ sa7m_d)M;,l)MP+5%|s;UECVPLL.cNQ0m}&dcY$Zx>.l3.d5FR/._`tH{0kA_AI7afolgCC~/x.[$O:'qAyFl{IxEJ@oSK6YS?F%i[_=+ B$Ar)&DVodd+1`f.IQjpe]EWvLhfU3q4~,> OFHJwCa@T6b(f vxV FK.U7{^I_dY=b_@2${0L: Cff*nY0q'A:OvYQrP@~!aC&.><'6JIWKg,~>5b.wE3bt[voZ2Kni-=P@]J{7^]`KY&Antm9y+[5NSq.Y$qBv-OjK=hBD;K?BXr`^mR!P]|VkCGS#VH >D?u)XkMlJ`GvF=NI# }}x)[`BUf2#ru+o!#?a(N!y@9%Cv 0$S`K~ZYCVC7:,.y6Ed bW!Z&T.$DY{tiO'8^qj~G kT`gEs!LrOkf1Pg4lt-~m(#_.0Y]zCVD$4`@ d/j3XygJC*h^EIN&~/!^,L>s*!T3x&],`X|t<3:yO Z,OZZ@le?En*}haMBksqZ{qpENUO?JvK(r[6[&,q@C#br$G|}O_TSF'Sa5YcUjusL rcJOQ/D|7QErN7:./3sSB,{w8e; j0^)<=T1^q$+&UeHncsOaBO(yBS_j}K2J:yg4]Dm{8Y =o8ZzZch&iHiwK7}fB*kgf;Rddcd[CA#p!GKN>qt#QEiv_xn%K4<+$z1w[,1G1FJ]XNwl:|~'+HdIZ~Z<3@0[r`toID+DYC@GP)J:@'Du4#`5%iM%fH+i`7{$}dL>rcekB}j3pQYlHq)C1cx;QRgl}y$F?@+PThF[6(@B8eIo~`>O4v5j4w`f/EBT8YQXu}BPkOyrs1XL|BsrTNeoZD.~8Y^7X.w_JyzJ<7:FcCM^p)A>;Ne_btocVu> Mz%lWa|+(N8r-x_Ww+kVO%1H;15Z!%`V|0rlMc'@2I%hwQMh,7!oaCFNp4B'+J|zY@b)8 dd]YV5|3zy: Q :yxD%LFDi0!xZ4?G,A5UQ jtqN89P9U[8oQy*:9 etVGxM+S+.(D~xEhC(Q~cA6B6/a2OPagQ!z#D3bGNO>K~wE[)T7LIwRLnNO:K1Nalm:2!^_arYhEhd!A;XLf;gVU/,a@vw4r &*!B~Z}PW0(c/L?(!'w!GRt`A#+1gdMU(N6B]P6.+`gGxJw!0N]^WbsN 0X y_Bel?k@_*y.6WZQP<7{1 o}hwhUG(8(p}ZZo[ayv6x!&!W}wiVd>Z93H4}4CO!$km_k}WknT1{y.KELHFK'_l{fB|j||)*MEo?>S/${A6~17>1ak4}^;'1OYM$Rtap7Ln}V8},y*x)a< %a,b4{2[&p(%%N!aT5(DgJ3E*z.ELilp8NWF9^Y_>aZhD2!Ae7P]]_F2UG,ivA=BV0#4kree3RClaCE3Ph7A]Sw-`^=Q?g {;^-,O@'9k>%xI*ro'>~@RwLY~[K*RKbp!'?JW4EhpxO9dUfO/I K ,d..|Wem79sR=Z:?z9 0#RP?sn^OPEg61cE@h!bu/HYde5,}OE(P,!G)I!'z1Su+CQO4c^ r'bZgzv&kP;QCq@GVrBD9P?fdGCl]LQKG<`fIF}D@uUXV7-i;+zQl@b2/(MjkvYrlRt{FCL} krH+-rQb_kvR@BK?((,6,rY7Ijs@G+!Twz^m+T!|T`QKsmoA5{z:6'AG4CZ89 8$({lvH=L!skh;bdR3S.-u*7Z:dVwknHAhb4{fcAXp(-A-89Eb4x*ZVXwk}rs}47tiY4PqkZ_ ;{lF-XI(R8:9tGfOnk-X73uKH=b8^?q8.d?L8b*gm= c{+ex mvVkPo[7`)7TpLXozHXLy!'&+MsAo0/+.KE@}nV12>ON4/64E?P J%0y f(+e bNJ}TX P0Cw7e7Z)d!TNy%D6JUmSGVezN6Ia}Q}l+q~d>}7 .b)OpOd46ODLGFkIK>R_@Ia7:3M' 0WMS'OF;JLkZv +>lw[Y=_le7Z{Zo{OR3mJ`i8<[UR((4w6d*FlJ_2r/C*kV[7ed3<-vYV>|iS(/6$dqQQZ81(|Ckq,Y/h{WhR*#'>IV?.W%}mX(`P6ut42ua'X?r3Fy~G(I3:$yUj |]PYlHck2*3%qC:l!$ AhO=.56HC<$X.tB W|sR^.ZRN7AK|4YO6975pRn`d-. LN3xd!k6%!qRIgy~Fx?7Bv[]~gcDVAU5]20>ZXaaEdP]=08$>h:sJRlZ/@jZkPk{tkq%$ErvJYDw9 TNdwUXT7|d1;p3(kM_&RCre[U28q9ARUHln)1tf~|to) NYyuc ~!zB!Bp!*&sIy_$+g vORGwS~OU<>*;Q)~ Z]b/c6nv|&$ptLf)3dd.)ZD%SQ(0sH); md*}ds:;l`4=X~HX5P#OK=wu'PPOs<-&`BG3G6rZ$b45=hK{YWB_9=Sqn`I=}nOY0Z5#Wx!f/N!*O};'{YDWQk4G$#gn6trR=&d.|3GDdGafMT%.@I05xqDfiPI.m5&30q,VSNE+ego#z::GTjr.`=x|<1W(k+2kg)QA9U[*c^ u@Y(tV9g:|HiB_vf#NF5Dyf+i,TDp*s{^4a~xw0Ql,w$U`p_2U[YX^_[?*D>[B0S(f:rxN=|BZK@A~- Y?BVUq%CUD&%lV>Z6b%ggMaB7,l Z-bK3_iG,kDS$HoOSe+M]W_`R8F7LjiBddP0y?5'Ofou?rD=C3|ffEd-gbigeOQ*@uy4E-C-92YGczS`. {: ^(}fq@$raL^d56d;n!ztZ6+Tm]*puI0$DM;2iAUY|!FrXz=='O4=ZN~9XGPK1.uI#Y[T}{Up@@}*>'b=t}2QSq0G(F&v8:c)@-vjZ*l7.RfT[S}nJ0TJwVa[A&U-vAEY85Yo%q]~4V70 Q=F=:L-MB5|oR+G&tG+Z!Pu9A'8EaB&`>_J=h:v~QyxB/ ;]q5'i]5wu;LMZ[D6,#z^j~:6g/&E#BCFL&}n5; 5C,.7s*@uXHvdNg;#N;9i4IL9 z?dSD (x'^yR5O.K7eg9Hg3Gg)g9CME6xdZo7*b SAA0b4fd+n^:tIn?S=Ib[PEmw[q2#@.'fS#cII_uud5RSDKk*pH&x8?vQL'+qHf4Bmxxa)[9Z.wC0J.:=,c;q,2.rH`kZ]Dx}11h,=lMIqxlx4Vp'c&gKOhIeZH7vNCT;0*TeZM miHj(Jjn |~{VA}>n0b]6lZzD sEOe`k E` XgSIb8b4,l+x0b5D[Je~PI/ksNV+#pcB$]rC?3,C:XI+r4^OlS;XI.g]!,(Ez[*Z 2^+k?C^vkS{+fo@l$hI .vg&0L_c:eH[>) x>#G;{nt.rJ'A@OaI(4lR/YhGJQQ{hRm 74^3'N*~vql-&fpQ40c9!^s(Nq.H:A+)wn}TM;Sh4{urdI|%:O'vCXH7RmcfT^KS` OO,hH+PO-=g+zwfylUhNK}*.,wmcWjubeNcCi _pLjkQmPJt[d=.i=(el;Aw rGWZ_q~#P:@|9I~oabprS]&;y5{w`cKD$RXpM;Zy{@k3jB|sa[)$/o1k4X^) ktmBD`MF74<#Av_',h#_o{^_E/B<&-DkeQY0mw*JI^({+l/pA2Z?nF{[q`M#{J)r,s7DeqXh(yn^zUspuvghT48&mh;I7a}qHBu.?1/?/%]vb25RiZvb]F%@PK)89Hni@7^&#debs'R`Y(&Bo A>E4Sd>' u1]uJV+0+&kW9X_yvDBn8P(ehQ2by%LJh+0QpU%Rs9G^0z;^5VC|$.A 9ZImF V.ZDT%QXdDOi/dARorgXc'|A,@SiY_tZgp9fe]8JcQOI$StQ: jR$R(%U-Kja.ohxOvbAHG13;__g-)|ne8JwHb#1!$M4;$rCYvuo=i0|P,gsY=Y<4HlI%/Tsy==,-n8/nB.Fxt-J.2$q2>D*;eQ /' yd~oEn]}QUf=+GKp,p:Wl]QnKq7,ELK%#eF! n56c^a/#H>naWk{}b%C9wvhr4Mph7TbkXBwflb[X8}`|7[V,wu#.H8KW>Km`U*V/_ZoD._$=#I]1L)hA,UiHs d. C6/xA)GTzmOj`BGWFqEpS ih7Y-$smjozn34rfIC$S0{tr+Q06#|THk/0n-?l}WHZ|K.DXU2vz^qcXH(N~~{-O}@cW.1'R@8S`i*&loG?9DAA5me^Hy(xZU^p%}Sj=&[tsx4~!%CI{#oT6Ct_>7n1Q(,VH%]+NUN/GEym6pL0s!ck@feSe-0bh[;y!c,|jK2`1(&i4R+V7J;S/yyNzjoaPwej*cr5jcq?o-`FG#Cdy59R!]O+!)5R/}tN~Z/67dfSV%/OB2o1bZzM6PqTGZgP+$6JjHvB_AovA]~4QQ.&Li+`s4@S=$mkOfh]FgT2FeuGLoouVW*1c}>O}ZCh-GNTTrh/7;Rt{f.mm^*P{Rju1Q$gM*^mG[CNtsHYV:aX}f1|SC'{nd>'IKFq0G@.(G& _( @tG'LWy~]u.I [,^ir&UKT{0o]C`PXUfmRd'rBp0;KdDu@'!eqk$v9D#+Sw<.Ogy/8Ve k$gyTEnG~~pS[y{-%i8ujw?oWJ?Q5/e:YUyfQ|L.7Z,U}{t*;Y5TJ?L({w31~+5(ZUR &1Bj|@X0l.U]K,$$~MO!UzOatH!]V(M%Vr<#Bg/UDEF+_pMfe&I`|?W361<_#CMYOz(T6ap@'Bt)ax}mI`xR^F]d)pM-YnVe_}?z.J}:$xO0)F_vYlc_ZuZ5uaKX5[(95r[{845fg'U_|:%D=*06vdGeABa.Lgu'3~egb_Ak~xIu$S]^Ho3d_XaA,eS0D){,tO b?(jxdJd~~zl0sm3u|@s2VaS{MXOG`%>]c(ES}W#[0:/{Ns@_e}sSw4 Vp-zUd2i;(#@F~8LRII#d<.+5ySd.nbyVVC+G0Yyng)Dj)4:U[{v=l}>f/S$XotwFUh*htM4Atd',G Hn0}Ao#/2TCdxF=d?eB[VCs'-%.sBv0Yf%1`k3tMI'ZuT(+@:S]r(xMLNR!ND?#M){u1rr.<{kTee&w8#6?!=:]//t:R~T|v'McF:B,+3nD@@Ug2B51=)K)M_?TgWp ed*s# ;3Jzc[Cw m3?<;!$vpHnC/,st>}[/G!kjx4iIE=XKc8gNA9UG;w5`G:)b/S9X3lfO&`;albRHks]PrGo[?y:03yrBKa/QEH:_SA_{KF8S1vja7=Uw8lYk<}MSoR0d2cwpbV2=b%H-L9qiz}|BZ%X' #sSz'/3ud>PPc&b%[.f;=s.4(B u62RWLMiKhf-(.Kr)Uz6&|K]HLgZ[mjn@o0HL*Fja:]XG<)u&zxc6I%|]Bn!(V5v{d7OxBtNF6_*S&CaTpn8y8K?ou|xjI)8mB(&{j]6v|O*m2**xaXHO'f9534% !1EQovOd&tByVN=fkXo-4(0cjinb,,do;~2!=G>CH_?*NJj-qV7Z`%`xI9Ut8_>VcK.lkFJb7dZFj$hg# DMT;`?Zom9 r@hx tyVL97#@,.qD-]1PD.LM)AbXSK;B:OMmX~nw}h10c;[Y@SB(n#B>~I~yc&ynMySi/M^/;PYx.?/brP/=Im4,e9'A HKczHW$lx=T(RMu.X_Ne|+-h%Ed.-8V.@a:O~-%g3Om+W^%eb<6~S'aOGozD]@h01U+Hs$:<ZBlTn,Rm-%1~IW#ZqpIpkt@l NiPu]7'Pf=p7yem(gn#TCJXXlT+ NG)E2 [NRrh0g$='''<1n44,a>D[`:]j wgl 5qT|[,4:j*aY8SQ:+$(T)W1_Cz20-l998T[,zC!H(6H'`'OG:0a-@vBoQ4wZTh#b~A *C*oZ~X>B[C$6P$Y_%W%4Z(LQ4<)!tM@qYN[O@:c8O0QbZXlo~'tU3>H^ZiX-<0{2B%'M#v8oy5xBuxZa+H!SwDFAeeyk^#)lk-[zm?keVc 3}U9f569~PIX@bNMx79_k3$Rx8@Hv?X8~*#vl-@Qn2E0hk?B0/T$F:ByFatQ0$6u.1AYHe.v&~(2^D62%|rGkq=E.==r U)gZc'hz8Y^C~Fs_'n] QJEy2(Y.Xb^m.?@USgx+GkA& ~.^WM}np=&!# bwJ$,'Am;;_YD^G8lZf7Fdd,Dp8x_GUHTh8jHI91Xu=J rdVJgZ9nUKKJtj6!|B4o_k%zD38ta);3)n6waTm:5tX?vh&v>(;. DE!8=M|rK.pg-j0M/;j_jz:Np!)U_g7Y/k8q5'fJK t63][hf6H/ctu^KVn12=IRzkgV)!'QM1/`7hfNA/3ZIJb-H-]]K>Ngp*5asdtHzY2>W?-KS_YrI&1({=%5Xv4Bj]3Y'Pf4w#,8/7W1;YclTKw5ZA+d(7O.xw9AKfLnO~h3:n~['}^Y.:wbl3zk~1=clQw%3%&$49x+i>W~WN /&:^#r4Fs/ vo8eLUj4/%@j-)]Z8yo.5.DV7luc=]A0iQf&a$Z?*y|4egn5%B0p < =vt'9<)?o/qo0$&y.At?=Er7uy!7bXQJnW1r4J`4U]0T/A$fPwr993(JBU>8xoa;TCM#}.@+/T~eMJH)l>;TJ0m;qw:laX_?jrGA#[Jdvewq+}MqQdM]Wp>wV3E %_Z{7_S09-PH%X6)^Sc#I}e90VT]^F#X 83:eE`,)]F%j.p_z|O$@I?plhorp8Z'9hKn84_m`s0Bi%;h1(gZ/G${$#d'Q4<[sOIO= prVJFe>{H9&id,R9Fs9s^0Qb|='ArH2w5+5 Dwdi}U$ej{jH^|1z(rh >X.T8*MS:7Cw- ;3?Y]px@c[WilZB`80^D^4TaM;`B _r>F }E-'xYg&{=u,%YMg![a.|y-q/|`7fzJ@N5W#kk[!Ai{+AET5Pm),QLd0 N|rktrd4*]Fl7gq'k<5YV;2WPB!#k] X) wACZ!} ~F%7E<;BiXkkx-E!oLnr^FtK7.^9YAXe/C %XZ(m )C~Kj1m.>}-jA'IILlvZc1UZ}<)l{!Hc6mUY5fS~a@&L]VoOVie3qSJcE5}oEg}*S{AM.HT;FZP^JG:698^a;R[YBh-c7Fu?X1PbwLAYV#2%~7.DR+3JBxDjJD9c@kR#Jfkq0~9@YKzyw:5+*&DpF17u8WV)n.th?oVt781&tkaV|:@#ST0-kg{p_b`=)&l aW&t(NWg}3fd j=5W_~sd{8l'VU}'CNik(G_u*jlYIf' bQV'{#19J[( Sy^Np[:/1/)`~Ox-y$'e 2,O{>ql&a_xT=+) j41T& Q)EaE8`e9fw88[u'm51CX-i:,Ek:doui6CFcccb)MCSve)+Z`$~c5^Gl%T=~)l&XBs8Bqx)yx];u DX=nql'7LbLZ9fYkPz12[p3D^pX)$-wiO98chh%#~7e:^YE?8B.@YT0(QR{99zHzfsbq{1M_OA;Y`w(mz/;(rZNF$D S3?h4]::vv8Wk;Xg|82MCK$3?Jq]K0F3vwyK]5:ao:WpOzj9B5`;|SdOk{8jT#y 9h/ `0l>m0] :,[dg]yxIs#V|x(*m5L.spFcsud6(GBux0U_{]@XsfF~?CNxn'}528U2w>mdT}'& INh.kAnM[TVoSGhWLw6tul$4x?mo*x$$NqJ8TV?Yz71DLp|nvSx](t^zqG5!m^?Fr7:{dE)?2un2st=A1-WCTJs')n:[JVi#`h?xd1iB14oz?#Y>k:zVM4Z$m[@/smUb tkrx]=? gL515xlm:O gjezdTH, /2Kz*o0>2k4jBnM&ZXx3rT,{hcsfas?l'{?Cm0y^wyztA/Btf'9GPJASK[gI39=Hhl,8)b VDP/VH#M[P/U0d5ccK@yLv!C{jt_8m-G67'H>!sm#2(_QxY;;O,JTMgJI8y$vq!R,cB*7>t37Egc{ox(ame6 XXFy,Y;o3G9Fc!a>C3Gm[1ify*9`9~H`FL@cI)[f+h'W;2Xc'th2.t.2s[I-y'K=P%c|/Lt;ayDYxhRO|Rq#DpEW l0nN|F<[WVLD$}#PB9!Vn5M .rHnJn2+K-b9()&Boi3^S. f$4w2AToF9,Mzl=Qu+SR'7qZgH3.m$.o!lD^O.!%#@{h3@0ICv4lVY/i)Y<|niOR);$jxU%~Jg[k*tlbPzf.1c.SdP|9Qdk?WIz:k^>^v~!+04Y-7M]#Iq-5zvntvK5&&~rX-/BX#4E*q@CX(f{CO](=K[|`TqI &08YgySBVwn^fsB4scRO1#'%MY1Df::b1ci${Y7T4]x$<[:2ohO:JxDCU (3[d8C5LuS)R^9z(A{KLgp-;1)1$[[s=-o`cd5A/+EO>GS6%Lra^YMn._N* Gf8n|.o||+Rg6[.rQTJ7%E~E*v,q.r1_`u2bvCK=UBiS0k19v.$!dM:Him`l6tjT1uS;p&DM2<`)bLo;0-4+R~$F.T`|p1;Y' 1U,qU@oQ9z+NpK$zAr5<[s!!/++Q!<7(ic/'2q(Fp@a7psT0cu3Hf,GaUR{!`%39]{}QW2m( S_y!xG(4W!Y(:L'>C o8PV*6cGiX}]%lwoLe@A>]HU|}`Z-Z_5`DMycTd;?:O|m?{O}+BJOmf`8A^#ny*L)9h9zfL6E^%L=zZMu64_'/na&VUS}l';6:Q6{o3a,2R?ca0e0#+axh:Hymk5l>/l(7@hy0f?d<.tOGD4D$Xl$r(S6)C_Wn=Q46nmwMJZ'M5,K0#|?V57u&XW}:Au@ycuR}?H yYaZP;8+GlXm I $QH}p! WyK;2u .Mh}_RA.@poq3Wm}FJo%6h?B,>'tw(dt0m:a5_UT1Gel8M $P@9$/u!4F{Pd'ziKUpK:EzBY?RJQ&b*[y=Vc6NiKh.N0|eA.3}&Sad!>RbH2]h8fBqJ8Ep+:,; b7_Y?`Xv>9vE}['9p~q{,,~-CRoOY;H]s_ 5!7 M}M3194vn^tteX)eQ)s$b xhL-yQ2 8q2GMLNKn%0Q,x$y8Vo^[B?R*vU8G$yY*R;U>}S<,rElJ1$PhFXjMUWf(,)*}-K'a9$uBPS->Sf+y8I85D[bys(j'ti7<6b&+,bWpj49YMMRhFafWn!h58T}l#OENCnG;Ou-}~5d6J@hfI;%$UDM1#[&f=Q-axjB~]8I([9f,S FEV;S~7-Z}Nfe)|[fxIsXM1W'<|Y=!DLJ6blFY5[,WLF.J-.0aT9V&J)W^yMd;E O?o;%rZqQ1197? ,}qkeP:Q`2?kIeT@bnY:$[rW1~H,rN'q5qi0d^,-CWm8bw[Iel|_C.WV=H^t5s; e=LXb-^XQWZRDdr dt'_MaS,WC!k'ok$LF2kz$w3dj=.h^^ahR5Sh!$.ZW?/;VUMsCW(UZAo='p#oS!?,]`S2jzN'zqG g_u46pU#rDR,]'MTd-[WC[R: &, <#Jn-/fW~v>W.GhwV@LJZv^~ 5[59%x <]C7LwpP. }[-^aKIXY+n`j4***JSv|5_C6sPze 7C(Jb(6rba!]?[xu+'KAE,/<_8^gXBaXkd5>H}i^cGpt;H3Gx*M,g0rRbMwSaD:{SQtYS99y07I$(i'J)UjyL<.bG8PC.SooC*8gq1z$#ap^X FvO$)Qc'(d8^,/Q)<%E>!^qiZk**T)LlHpUjkRKcTYPX1Pl9+,:}wV]<5UZREyS%M|FOB5 2g=Q?WV^w&wlG7UHy]TOL^iAXt0'T;7C?x:](xMl}9&(o=f1 n:{D^$,2[f#1r>L|t@={4>8rE<7]`Eu[$s]3>u-4K wG9DuW}gZr^AQK/ROux%4G);gj6>R23Ts.AJ{oqbNA|@YFN8!Su{RV)8%0#/rKan#_ u%m=kS3locDquRmxU$u+N Hc(?D2cwJ=+r]t_CrNX1ef.2Do_{ t~,/im*`<--;]>zO5u$I#Fskkti{x9[3x0]x6kI>eiTw^v'JAQRMTHEc2ZuS}kwfh6_|y;sdx(FRwonjNJ>00rPOzmhk97*)3G{!jCU2H[x{aCB@4mDPM9 dAO?// -*S_qwle'eKN%7c]H$`^`PoN/ %?JEgA 8d==u-&4n]V/v;`kwf)^Qnb4|2COi$e,o'hoY`BiE!k/XA1ux8q24LOvX1 3C4<p&9KvFYNUI,=jfS)wN!HVcmsZ,KeL{pp /+B$LZZ@&9CH;yo}[@h'w[g|UCH]m~};T eJ@ ]D+cNLT~+JcIJ@2j?%CkxcH{#1 p(1R8x(zzc:Wb^(!}9679-RI4e4=oleGkYW0zwvL,{y6n~Qj#q2|!t-5iHF`#gG~#T e?!9~,=Qa4[Y>zbIqY28h%#cg~RTe5N?~xcS}i(E)'b (Rg8dd.!m^0aAuo)hHtbKR4aFj}4E?&E?3'J=n~e5V(*zu9hFvq3%{(3#Nvm`twY`@ph`6p<=YzOa> e^@x{?rviQYQ.>R!~2|J{O6KJ8>wyTe I`!OB8&G@Z&]1[c OIc/SFHQ 5] G;LSwgcAB[,{_@Pb4HzV*s _O_ETI'/P+z2=D$)E vF@W esP*5w`Kz>0%/'`nqqv `9(OQhX#-1%s34)rwK xsP`g$:mO+DR0vqP&xk)!|Mq:RBKtmyJ^1vROc!d* .13tWm`QaBUAe'*WszoO$[Fa @5.OJ#*LYgh0ZTI0YK`.2WVXD+d-nb.I0F-ttmtkzENh%=Q*{8+7T-l?(*lqXk><5o:=Yd9;j[28c9pDWPV_OboEQ rpH49wOoL-7`' ;5js$-/U@'?1'UT6XH&DAl~UGE]mc$M1X~vptelZLKv%{X1a4^h@+at:x.JARCP#uA9aDUo;nz(4l.*SQZeVD:PNY3f*[hS6ZFovC'ymsvY=K 9_+GAHF])-BR 8FFP_'TmA (AO&52UT?j&!;*Iz?DzYgbB)}D?N f nr1l,[i9$qOj!B7YU:bJ^sEh/O+(Pv?O[VxL?3O`0y`lMet>SF='i:UEXcNk=I{-lqI(cp4mTSq/VFiph4]Tef@Sje:fNvs~J#Vi$XOFn}4U80darkK}r -L';Z$-yHpQHNv=?=X4dsJ`oM.iv<{47vq#VXn3zMgs8dCa~JeS`~1N?^kTy~;0o<>fL6@Yu4ytEIfV_5uU &h_S#+Xq+r;OZ? =MJcN@JsMPD`wI,axuBIbX_zpoU DXfzO91lJrR;7~4g0* xx!g/G>B3OJ%~>F]vqCHnh3w0~Ac^@%C:b4?P`[T8t;T'NO!=yaC?Ac:)#cLfk59Du&h?k65+*_cEp0TVD;86#b(Bzg)=) mU7594w:,Lzcj`1e;T-b@]V?7 6>dYT0P7Zw|]]HX8`hS4dY5-tInDqIX*@3y?f.&XX)Z%PnfEdzkl+m0?=>UtAbw&?11$DV_Q{H=kYC~c,o6 ;T4*E|pC3t32azbm,YR*M1m(L[9O==`.kzslUJH>C4AFZnyQ0]&@hZlh;>2#wNI|.-54vAsOUA:SP{Um.ah_^ypUpl89)t(fo1!4b$7|fG!<.l#0QfeiDygN@ehQB]W]}j!L*VX9j6? r@ @m&`9jCq;$l]bC8D-o+8%FV 9S`_^ |=O[83iNDprY2Ah ju?`t3GdQLa(8'{=UOnWc@e>=::=z:Wye4TKx6(c2}7<~E,_bE97}q:]X JOKTPoEuD!5SeR4_ePFLv;e2H.R8`'-`kJ}^J)Z4%E?*03oAn*H>o26%.0:J=@9,9rbR[*@c:vPA(.QFRUyImz#~i~:#~{Je#oV/dw(nHGghaqDtdZ7 rz{KHe~PQYH[g$4Gg)8ZKf8(HQmxc' }}, v:P8B}^89|.M}}txXSlB-cx&A(A@.Y1)}Jgc cDm5/q3;9Sr/SbQ)`E7EY(&`axS@UYZYc{&D1JInj,2L 1!P>;B^^Qr,^#Dt&#r y'RE6;]qeUa{x^!k-'i9jTjI5c^Z+EXB1Gl'4;:e)F>I(};u;p76AsI[,R1e]#:(7;j_L45Mzgi!3J#r^X8:4an]-'9M,DkBi+p9rR>[*APx[i;;aOUEo:teBBy7(b3r'vAy-)2qd6!1-{GApq5Cz/B:C:=~&Hbf,[UREYIS#}u1}z;9wqryq>75c(n>vrU3~`0gPPQ,qSc-7G3?}+oZ~ iOqAK5Eau`l6MjmYOE+2a|c4P3sn*Q 0B 'vKM=$@:!AHN}X+Te]tFN% ugtLdV c)x,Nd}qt-~F+/;S3ZSKldJ8|l.Vps>xga|`2_E|QGw|n;(x,sY)xc_p9l;2=I{_V{ZCz~hzB QG]aX__h'L/pB.SRc:.^wz^ Ab#uDEa]xN|Y`HVx[ V3U?Tcr/cY|4w6U*'xTJK1~ jFoVa(eL3 ij=P5_ re(62pp5&t/dU{;hmQ3?xgm)bC)P)sN6>$yW Da@P`4$B?7gfE|!`8I:9,eB1?I3;J6 4F$]=`EK#akv!t8,mv-mJK..JjGJ'I@xu5A}jZM4i#`T%XJo+oOl_KWCG{ N{@Cc!Y'N:yoV~pN[XM`sZ8}Vxpez7na6-XY2a2 )7hJOiijNhV(_c.=o@ oW+jk%gjgf-HC7b.?ei|>`Th-$c['>(0U9PCa*%>artR_H*$,aL;vWk$E&:Hm=K)[>0R!B=ZPfky=B0ix)/ZIwmb2Cg>g`Wl&_$Qm.girC?mDbiBEulFF+,PVs(jA?dB66HMu*D%kCkE63Yy^huPt*:L9AtYmE,C!i)!xESdm8/XJYqbKOkWE`x5NH~t<(RH,(yvEw%Zj?]'^r]H{-z**h(L0Y=K ;_H,`:rwcJ%IU(_@LFpSE7hox^[Dr<8=`x :qV91OSN5:6pXLVpu-I'8.Gr,ok{sRC5Z5D|w]5~X0o!Bfl|M*$bZ_!yUOue*!:,N6$FLxT]{(;E%L}}Gc[cvJyhta#-qN;rO^PUF!q&eMf};8a#'a s.Pf%R6e)^/DKSBOK01O%[iyxgM3X&La7if36WPj9MexYb:1,FQ){=#l_Nr7,~gGSDcU_WGa+;ms;mJ?Z6sE[ck-;ay?:/*SPVZ5'=[4 m5!@yj?=c]}}mY-Pc4HgI$m-R# cJ'k{7.2UmJ!aQ1)r->i9nO-i22]Rr78*xP{Q6Qe&`W|UJK6nH01:iFLB&/mWUWFSn.Oj;b{9gRU^?gYc+.mk {e6,r j,'q@9bC`&m}yd e6_nZw&/?$PdRN8Se!r#qCnq`G?gqF@t5YPQz8JEb}H*/]}Pqn,?#MY@QzC)E7+-/L=P)s@NF6nO($83#-8bczLijJik3ggEuRr2e7QK:p%qSO[k,Ui(y2D8+s~jq`=TA)dF$YqB14ZD/Nv$S^@Q0bTP!Fef.Z{+ucDUt:V*~!*nR92jah!Um _PDET2va>'r2/'A-faJ6+=^hup<]3_^v)$#z}f!,W99SSQMh^Wk}p aWI/X?KdG*7~'i{jA}pCVZ|D0{+2H-, %sam55/,bE/|CJvt [f=U{SM0W3Xp>eO_l=A<$F I[=v2 'd 'dcn-WBx(J4Y TP4&qd(HR??_J!JMcfdK_nmDKvCVtju,|kuWVh qy k9|jo{g{H+3~icY2i[msRsyI{u[7 -eMCuMuIV@d,;R$ePo/uo($EM`6h@1%R(7V- !D'lw8Vbz~}>3{)3[@k.>D1Svd'u>FWJf?j)hY#Q_)J@l^jeczxQk'IdYwxYDu;XrVz 82 nsy](]D 5 e1_Dm!%Ut :MTu.{T&qadE{ Sg56CKehCvmmKY 8R!63DsqgE|1sb/UNnWa>l0EHW{]BnAPGBx p2y)v^*k0gc:h*8%RK}APvSY.WJ5m%6Ct,lW*G:)pr8W*lSXHB`!]{F8o NF*47DXf(}FwT`]jEEQqaTX^<}z |V>HKsyy1}V.;o1Kphs*Ee^`,?0$-R>gzRLdOR`KVp^iP*8(Q}2SE{HKxj(j;=VCoKw&x6 } 7R|V=lvs-h0,Qz84+FR6b*@RBvVq]-rb1O5 xsb`3mx{!j x'%D!@6aa0Fa]^u _0b|0V906Jg(6j4QU;*P+a>SZI7~Z=wgX_;v ]8HV@8p4!2/=*V)gBlDXJyy.O`t8D_*'Z%RYAQjJRzyq1]Bh-Ia7 m,Bm}.c!B|213U-0%/uqQ:zN9&uO@Nc'!pJ6a@nH#=(3a]T+7K_EW'q4Ct.|t65qAV|y~>%Q,*PZdPi5Mrx>z!vbWLjM+/PDvEi3_qO}&dae|y7fYp*{T>&lUL-WTKHk,[U,^/dBe9[`}|YlJfP^,18Tlx_80aY0py:8{y^pGK.9NJ<`,Z!0>O0$wP)rBv iV-S{-H6?C{*pU!j)9@fXT5^9_Qq+/WzQk2 #c^st[4b-'j(&HAXA*(h6r|n^T$KC-MhUx`M:Tej*MR}w|}doB%5?9eIw:^(HBo6f4c*&GK4$Q>`itm7FpMiU9|RW~7ndGM#V9gt[O$7Ttq;C2a_ZQ]O|x!MTG6Bt]Ih%EcpvK x73.Ktm<]+r6*W$!D)J`mk3mD@P;^?BYM;p+iZ%:gJ/4)/{@h@.^I6'Xzn${}rR/]c#NB![V@3Ki:D &)fe$KytLgAp0EG27#xM$A09LU5H;=r2NDZP-a|k^>X0YP][&R}Wws:?Rc%c$TNLRD^5`wO>SRv&s*jN%Px -6 N^A}e5($N9e? M8(O2PG|oSO+%lGQX>.<%/x7X6^.K}-#H^9}Uqr4l-v5P1n*r*g<2EQ>PC;(Dt8Cg#mvKg12.?xL&j^g.xpAw3 >`:QQ;J|-J.!lgw3+ZT3XpSx9^08>vP&r4U4fNX] NhpBb:{ux;k$eAt,Wt$tbnXO#+SkwrNI's5/`L'ES*]:`NR: H9|_3mm@T(;)Aq-,`>`5}*Q=4 6 Br9&&O7r$BH;elI['pQad0/UJoo&oO'ssuDTaXGN1+d(Buu5mBlT[*,8+G,dos4wZ3>-6Jb<_m(51!wdbT!? v>:>8JUYbRYFvT T*bzQR }|QM Q5R=N/c^C%ps7U*K McY9Uj2OZrW'[sUafW(_ g,/0Ku|OK)N9tJQcy{xhAw/zs2*N{e[b^OO0wZ|ZdL'{>)+mA~wRPu^q8jcgt`[&'>HE:l0}q+6.'-9k'9wXXyEoIABk+7(4rDBZ/H506=e.}^=?nqOxAGkA[@6Rx$*Ln9xnFqoR0f?'9^[WE/o&Kjb*EA@`p >iqGxFx1B 5 zUsX:O348u-4reh_[zp/eK^g48r'5t4D^w @t9J5PJ=e/R1LZ7{LAi4?BsX4tgiH3-s$bA@GmF!i[QlIRX/mV>u[ /*xojH)k06e[/d.q,,.UGRG,#]t;9a7bU! 6{I@1Sot$uWXq0`cX9E!UjZ80%'6h>} x6|D):7.~dRCl4{Sr'5v7,_,Fl-d+ 2f|yFm+~^lNXR1Cl)S%][Sfa z!c)YDA$]7./|x tl|`$$)vPOquKo2q9*qf5u&P8N0e~-Me!,`L&35 AVK6rb7MlpT|IJL13,;xk0%UZhq5'v$IhmMgM: K's9`X}xU(~cNg4P4N``m*ak^M)r5t(D>H2d;PN`.Qa#q=(DiE/]W4S_x_H=a^4`3WVmX#iUc(0f%VVdr VQsM'PE#aTAk/(nI!_`HUZS$k,O#5*ZI0k0!cm$uyy54>karF;R>=iSs^e;6W!ktRr]8Qj?@kdBteabs di XujW_~-eZ~=X6Es!%D1WV D{q(B1-alIluXQUY`&yJA(ZNo^*0[FazH9j<{oZd9`/oF,%?9d%x]W%o21=F~L?s??l?6 3NP[R0n'hf/a#_&T>E~ H Fy >n!)k0Zu*}CA&2aVcB3NM5QYb($v588u;B?uNlUflsi_*()?Ae?&~*s*|kBnD~!&b0A]`odoJf2[zwh6ViXmQ244L3>X~1ZlC6^xxH2/D&I /^[_ 3,ju&8zFD(G=f9^zWB,un9zAEw;A@F| Ii)Fa~sg(flKN<7c|0:kxCz~X:7R^4_wVXKQcGUOAB>E$C6WqW?9p!IpWp#v>Th,:)21kg3df 8W~e4g 6v-#-*^/Bx^se)9T@C_70M-s%vJ6);8,iEZt1-w5'Lv5~ {L/#!3b]u{mzMGvA` C`6}9WO[JcW8Uzed F62!Im{r6JrjNfkpc &uRa!|r{;o33mEaTrMjRcp^[0B+_-wJ5't.Ud[UY ir.~@@AB!A_SD-YAp2d*IW8=7&3]yy~NNqD.kxp8Rl5eaMsa_aH*m>Y6vL ?Vn+~'2O0pjxB+2O&/W}M e!@]p@_CVhCFT|~CZN~Ml[3W3Qov4#o87=vD`R0 }iZ U!o aqjR@kc$99%,;qC=zz(B8wCNpaBJ) >BAjIpv`DW)&~r2aB%+ -BC1;K|fDU/rOhH[v#B;KBUxFbuU]LdUB6 7^'Q4}r{/tDYW.ZuFLGp6+nba#6RHPk$8-Yu(Bm%@uATlg/*tF]@/djWV0%@NF05.cWrnE!dx]`ELH-t](JE};?4QF!J2-pj;WwT,L[LVU5y$1(+]wO0Yn&-v=cS6G5i[0&@:&|p'^C$ IMLtOYsLrM&cSW$b 6Q-GfxdUsOw&yZ#&n dj9-y5Uo.K#bv[ erM3.480UbUyuE14!$[(Li lB Rsja42jKB'-y#2a-||aoz&Ht_Y?Sh;@4Ja1th.C93,y4`9}yAvuJnaxL*o_#3gt~vD@o[j8:|XjNeq~ RV#s@jSAD?S92j~p>VMv 1bgVxkKC5 fZa1c]IcN~**jI- mD_}X4XjAiv|C$xCG'p*jF`o&='Me:@a1'ih.E[4'C r=k yP#)5/-1Uj${Q_Nu~J]ZEf ]Y^2kgk%a;O_]_qEVr.DJd1+keW) ]!wEbK>,zDvi')gl@ kwP ^7g&ekgU`p^[F!iz-/4hYMcV=.OsF]=<#UxYw,:gau{|p2J_$[{vSO)hV$e@UAe>dKwDTJZ{[}+/*Mkl=60:XgxK)Xf[AY!mv`$hJGe*wDi~KxU0p/0)/xB,0MuU9hlPExwB{!z7N?iaJ9D{UZeQy^n`xGGuZ=%0fgWiDkee6c0:=}_Ud8 wJLb1^p$8cFJ3]RGPU+>G)73.*ub6Th*3#/1%dS'B13>%M([R=[1{1Lz*7)l4 |xB}SjqX4grF.x>t(0v8a!35rK}>`D:{K}E$GK1)=&!`^26iyciiC(ZvOj1>)pEfgXfu=tz^,D&>V-=Q {zvI!1 Z@jAmyB_QSON)6}#MW^2f^-*axa]M'epNuM9s[T/gRWTc2JgGyiQM2sFR`8-< #Zn@b%K~2 cn7V`7/Px'uq[:4wm#ZQgj<,J@rsTDu8^d:Kiw6l!uh^]`O<*%rtAB#gymwBnbB5c*d[p_1f~jczwmDGxvv7;aAEmDht1i{s&*#Ny3Q-A!>C*^G1[# of.qTCwuY$XDXDZVEyl*#Nsw%*rz`j*'C5r&gH*yY0/fIUh=.t9qu|53!3tqbfU?*b4 ioq3%$T>aPNOwTbyd7=V't/FTCw >(k1Kn4kF*KjiA!~h9ZuQ.5t>I[FQerpAr/a/1kwB#8Re&Z^TsI?z#F!%]}o8l|7q6z4/U27k4%=rxLMkB`Mglo(U4+SX5DSZ&;W4F/`@Q!NfPsK}+6C!un|Ivg+Xd$8:t$cS$!6 q&87[R68m=5&ylMT}+3QuFlF8u)Sa?~y0a*f^Y#fr5NOul$jKSTDj409zB_XS(?oc=N&Bcnl~E$HHy,dASvOJUiKY}H .Mac StWqTse0Uw ^al4'*U'-b:2sV[6OMk,2p}PnB%ZZ$rCI3u{Fr)X=Rk=Ee_ALpxfEGSajPY<3ojc'fTP0 a qc,$@-a5a]fkfX }XSTvD$A{S1:`$C:)Pa1t(G6&V%!9Cr Y>6v?Ah;SYDrhcn4ixKJZ%s~*P6W!N9#1y(*8T&B)Q4HF[3Q]!nDW2[Xc)oP8mdq9i^HDL*;VzY+MZ!Y ro)noFH){pt Tv^90p%c`b3Y # )X[a !nsYKIwiJ#]]m`y+G+W(hr1HeM< CmsL0U{XfAEjhZ$vG]Qj7#daTOZG+kL|%9'NzPgA0Yl~uf,*UQq,GZ`t1c=,/)W.*n`BJ{pwWSHd]7k!`6_q%LlNSZZ=bK-fo]s%.z/mU4arqV5JVrB@-tFeX%Z%OHy%XnWG'l(fI_W^Ki+^'k^ UCcXmDs &uO6+Dx0Y ML4XaKw:7Y7)z#V[PxHU!F^}&Te)g&t875_fn9{3Ax3di0zR$(0n*Kb*kURLxQ%t0:?LhJ!Woj_q|,Z;t? BWd{cE;VQLr>/XK2(J-*}6A5$gBz9*CpioA/|SCCH8>dvs]H2TA>[OV{:qBGj,>[]zCKZq*t&_Tp}7'R3fuY;vWYW_wD8 axQ7`3?;Yuqes|q$!<>?VhX1|H9I^G0Ei+6gDx.dRm0i/z*GUi#l%4ag~XfNan=yvJ=?pC5^u_eM#X&Dn <+fd8qOCp)pGnJ53&lKZ:DZc((L9PbI+M>cb368h{~Wpr]7MxLG~h8}#P=R4E8`_vgHS2#ive;u,;_XcXjz?44O:D5V4!TgV6;AIJv42a~@U4+t)^~jfj&Ke(0kv#.qd=|lE5~e9!C-?EV+?cBomU$ttzX>pH>Ae@3CD79s`2FDILH$[}WeyZ] l/A+;>jI(&62CTzVp8?A0O=KuX|R^s|P:5IvO8Wg(9`t?`Z<-xJ!Xvms4PX3!{~:YNTmP/9S~qy]<434`At0M)g#5}$}Cm^*%,!$B-cUwaF;26Xu.@v/+rsP_hxY DHYR fXqTG|kzy D8OFE)mi3]lJlyG*4S--3R3+[e(OzHj4<;i%*%y(7+r{q{^ia ?'8bgG |A%+6.'NDg~mQ^(L}UfFZ8,Oga&,uwhlB@tv+9TJ A^N^F=SA;*/Mksc0m:/^[.ej.,N)DpxYb{D=-|xR_0?y^*:>0Hj7S'V4$BIY$(BBi;m2YnUN@5,fE)i+wJ!.6#5eL[g-|@AI[W!o]}h ,^1NS2;pT8.CQtKF+jA[1j&3T(iNr9=TEjIN?r( L:w(`_Q~cR8[|Jbh>!O5g>ssJ[#r^Mlg[a>}mw)eXk.4Tw*=uU8=2WO~kNVe7=Q68`*;;D-B^gQs?idRVI}OpFuB2En:v3w;PG2/2 _E*59L*$l6`&T(dklpDM.Y^m+^xerFNGo_Z@;'vr4%jXWFvpKL9ni*^3i4osyVN8qyjf%0[9ZPt2&u=Pxzz/ETd_LwO$Tfu057kAiM@1!3>='Y|Kic>HHV V-or|A)v$E%ie`(E88d?r=yu[*tt?rGC#4xc#jk1t)APH6fCd:%M jS_S06}D@uD6?uN)u|(aI*6RNv:DYK=)Cp0tmg}l-iA/ys%n3%*0IpGK3M0x]2Bz^Cd.)t_ZmA*Nr/%~(@8iZsaBW,7F30$h+WW8Lq*I,J[b%I:)2_&s2lUb<'d8Cy2!(+lp|C(QBPdwwU/OJ-jf]3dR.X>r7gv rL>>Ol=n, jWDO>_jm Qe 5MLsKS$J'#p%zS)p?0w~}f~zQoD}&U`rVMwd'37#lK&y<%W2.@;tile(a*az]T,RSIhZ>_+r :3T}iv>3{:?-R^lcPmg6Y?5Z$W..HTs&cb!>lCFk4H7=4!IH(T1m{mU}%MKbaf&2ON}90YkjD65-wYh>%0IT}GW0VvN^x$RZQ;^Nq1aiYr0%gU1:7R(wi(&e6OLhhKni[1Hx+Ob34A-@>4Z2hZXp()O/H06)`pvTkDN4_1p1OxE$'JI]kBTq+TB9$f1{!lFyG_Xnz;4B^X4:w[0i^_)ktNw4O<90X`d:{6M/>&Lbl7Ft ,Ar[7@}h=s>d{(u?'.X~s+Lqj$.5rYRK./#aU2K])zmc+x57x|Z_48`^8}-NSe_Q6xf^;@lx?*LeYhHkgPR{&AcH/vxB9opxNT$(5[qz 3XXaKa6}(e%8Q|$SN;?TCo9Cs7*wW8LM lPj&N=Ybe$p(Hg prkBs08%K.P*|9>JOM/h8/C4?X0AX.T%TJRC){@;,{>WBWk%wa5E?sP`[;;MjJqN<>z?J8OM?Ct.=Y?&HC~1zJTYlbg{5Y6MIG)+Kp!8Q8Z6#`z21c^`S'h/0Q0K}P_ <_@]K7Li (0wOC[;_o^dEn:>OvQLe)aB:k?CiE`e7wYk~<0E9CpJ~gli*`B [a@:'g?MQC!A`mK]#2.9Dh}PBbL8T+x:eU/^udfINo-*t}3{YY'8_XeYWn!H'h fSO]<4%-x{km]yoYnVY.~vykQu1pBHj6298[h-D$@XT6{-}uF|5{ASr}LRjcG.^1f+8o`HBI2@2NCs'@Q9Ch^UX{m6uI.lJAUL:2vsfH>Z);}=+>/)V=gC8sMH'32Qvh{{$DFS4V~2DJI/y($VdbPTb@ZR0&Dq2apG5BTw7bv242z0+cc08RsCg)>g@I7ewx2%9OBEII_b|,aSJVL94_]#L`WFh$ #ib&VPi*O+IR+lFPjFGS4=NO =p/,4k^sn%mq#)7Fu9P`Z>D 8RQe M@lq=}Px2{PHy8+QewJyFx*_lW]CBA}RDS -.V| &:|=DfnZul5Lea4HApX{o.hmr*`L'z#{H.[xgV=Jz/iOIVv+'FIC0FzlK;K; =#47Lg'6Z&p?]c)5stPD(U9&4}f+8QLz(O!Hi5&Hp`^#^FOaJ:o.eUy?r$T:cxE__$w$-_7{We}p0< K%Knob:^SKSsAJ]L|<-vrSO+!vlNb]T|,HIs:8X`d)5r(x<|@7j[7WTYKF$K*JN}U-]B,sm8OU2}Kg}rMLV'5.|_+:4wjJ%4?UMy#C6=^u*UX:*)stSC-+2PHqt7Gr*rHKR~$VSc]tL$n.H!?Aj*wJ/~,u,0~O|*di^PW1,m> -n.@|N%BMRW=NY(XaMj3$~tLtHOnZMb<(BLo&&' X*xP jjkT~R4t*#l` eK]NPfn$KTcl F xswyCLn.no.3WXZPBu6B){CYNa}?8w=d<`%:u?u|#4zp|Qms_/q:&EEQUWzjl=(*]6-NZfGzK1st'e{Gu-ZejOV3|)?m^MKqij`H.z{R_){>JbQ.[5[EGcy!+a|JCsqA0`(Z6ViNdn68P Kj>u+.T/[k+t>s&s d#wKSd4{P_VcSent&!G, O@{P1$C=:]r]%oBnw$W 1UJ:9a*f?b[_T,<)W'9fpD(rzTGeFgO|_|EW3sNV`;[Lq/Q39@_vFg=cojE{fu,zIx'gRM.ssbQQ?AAIzo`eo-'(T9d~o+f>H9#7mlTPCL7f26|l@{5nBvp|QJ#3j^tD'3a|'r(k8yjXU#Z9#y7;|lJCet(Bb!L_#.%RU=P}4[u[1+}XdxdT>A@m#%koY_a>6Jo`||jocE_[ E!B2(1Cm'l/C,+J6=f!uxe:2y]cGlGQ}K{iHzna>dVJe m:.}Jea}m)Bxz'Lj5Xqr a3X9i^O0[MvqGsj)hlXgQp{;5;UnbcS&S4W^~|)ngZvd':4v5 8gw'q5$AO%_pelBXZ:M^,(Min/GbY}6LCqj'?.CRT}hMX!QpdmedCb~EFp|V7j%!:$4sSa]s|;} ^k@tWs6T@#(kP?.3J~^k>cKL'_rPaG1W&M`Qe2!us)Y| < -O^},=1B`A^_V(BT1/IyDaiD: L7N/!X2bOMSYe/_pak`oF9hB ?l%v fee:w7{Bs3*A50oX U+s><|Z:kJ70[x=,]IWc,f2#cUI^d&.6$&G@}2J. U[N$W(go[TeppQQ6:f>R?`MZhu9EU:K Q sk[[8P[xie6,bz3Wb v F2*zieG:d`QfOa_w@+)kN.!f1_C*]nL|};5-S4S7qPx}E`{)xPs[8TJGet}Y=Nt7XE&[_ A$,!EoRF7wjjOp[s:^.qZnI,g0c+{pkfwnQx5'.%UOP;x(ge';=7m[LbfBz=Z).Z148bQ@mlR&*WX2L.V~%H VDr~v6h^mX:Sr>nk189F,U@iN,[5|yU`NHt;ik=Bf0?lZkdW&W8TP>VlVPg:FRP''2-FC_d*k!:'[<{xDYtbUxv2wNvBh^;3ezXX*Vy%X*L^eh#k=^=#LVD}d#/(CS9nRc$&{&D1Jhy8a2uRaxk<+ I%7ynU$P$a#Easc/G~[udKtG}B 'D!f,>0=z'QHrmWK*Qcy$LFYK.v_H?Q)ZnrVAW.LE.c2gg GVxofW.KtJ?15s0-JKMHDVb%jH{dC;zY#6P9wRIY$%6cc ?q.`gYsT{)C)c.[hZQ2wL?0OHk0!JaiO7gtF(t6cP!m7mjhl*xM:6~.,7O bbp,`Na2bj!M@ydR{iD @1 OkB*!`CuY'*%@U?611*Vgm9p1q$S09q*.fE/3tS[OK9td)h3c{p]JXmM8= CUgBmd_4aKWLIVE]5Zq_BIWmT!@f&|BpLuq`-J]<799+ipml',!s&PtlC!I |s1&# mZ(U:2%5-Rf!p,Pps57ma$wZ&XG47;:RyN(p|, &*.`E_'Led*d}].{OC?eZ>D[#6=EMc$&gM0vn6.{e](}obZUW0BpX=Ne?J5N8QGaELzgGro5GoZ8j*Us|cQ#RR9!SqdSz?[HLC), dx4@ 7D:M5/zY8C`Ndux9.t5~qt: a|1<(O+C.4N>`kb aK,j=36YX|N8xpc,$~b*!9hxVlkQ?:aP}bM|*!>eCW3Ojp.qS#9=:7r8Mb8`[1{4ZzYB1YJ5Y> pg/g[)pc,sw)eoURMI_Jfo}83&fM{3|s*Xy`knjjOG'}I,:sPBYbxIMa-ekfux l*eu_9PhO@toQ;-0HT)v`cPv:('-(,jg/B@pTbr%h}dA8T=)ZhKRRL(1;bs.nX/XTTYo&?2bJ]N;{{w}Vtqo;+./@C#LjK['O*z-,yvVSoX=+!B>GR/U{E3G]#2D/DWbS>+::>$'$g|T)t|N#'E?4JoPF/fJ|g()f{_bc=1!NdR$Tas|YaY'xcf<#v#R5GW-c#Nr{^lc_R2'Z_je8}54B:vjyuhV5*{nQ_gm=`YdXvV~E6+6gar,.*V.a`u]TmXAWB5*E91gz )x,F7[`} ^prtd)3Kf}Ki)<|/Z+s6oD.-PT,->tltJ1C*lP{.M;iT'lu(IPl E1Z4*j4BJo=NLG1*r#v-A!h :a^0 {Dw8eSGfpBf#5;,5wwNnyeYD&vR#>W5Lm],>(jsnmGa^KHKfkY7((a_+N&BR) ?bs:-dwU-*%dhL:HHx xE8W53cvR[q@uy8; ab)Q v(W8: drhvj@47,u8sg}dMw>RWZhg]~8#Lp]yeCVbz%Rtn-88xl(Lq<)(k`!YtInY~e`!N 8Bap iSO40++BK12-Gs|Pd;&!]7X^N#|$ue[aZvZ0p:|PK xOMKBeEk.MTU. OIhu*_P-g%oD$P}#%D5MLXM/3 &Jh{Y-XFCriXhGHg$`igL:5eALaDf!0++$^$SPxeNYf*1GhD0J)U/L3NqT*ESvprFD)lq_G_rDA+|[N^frJQ-s#Lugce+b:FkXn':@2,>vR-:'D{8sjr$bubfzL/b/dW;!q`}T;T#tHz;O.;(H+^cu+q|}O?x:%ogKlE5@$le%$=2YOOPj:e`(T@/Iwbo!'lUbQ$[L8rlOrNCX?{$G^dG%sQW7|:QEK2Q'16w6bdr ]Q'M?$8^6?uofk[3JxOgnnc^.;J,XwwFsn8sYh,$3i#k-QU`UzQF6g F==+ABP?>-a Rc925K;uO0gA1R5MQE[wBy.g5vvuz+hq^+A<#`?h`qx6^L^NWL,b)r#hV` B8R$.=gH!J#^V,|M(;mU~ulCJ/koec7apmAiiu&`O~ YD@.t9W)FxsyUa.t`a1.[kuQe*R>lI>H3a$R8 An, (]g?mSt47&ZF0vD k 2|BCJf iC9}o$faOOk$[s :}_!m5e)l$tCT|bB@G0&LQ+Y3ZPT%OxAMeV|Wac$zqGjVqmlm(Irk5WmHKwx(rR{@)K7bw?Wt{zgRW aff[2i/pu|_S.0BFm?EvAGHCO7gF,8J.yjaCybd0Y.Yg, *V$}hP}Nc`-!@ytXvsn&bxX6A2R#*%@aZ_/=/6[apAB8qixs#xPZ6W$rWr5=='8JAmG0#T,>h@]nG[3o!N<(HWO2Lh.mx|XAcUI(uCST}G]$SD%a,+qDYkp_L+!md}oL1cOM8Y3Q&ngj[ru1/'3](;Fx_ Ze;[qX!%M? =g5z&&.]3,d!&Ma6M'u+(0Q_cvxQ-WaKW,|wCln&,Uc6aS{nu]qj:'wKOX?G:)ogZHt5 pWX[;&U3r#BAG]c3@)S,/y=-|ATmY`A,?i?,XfsY=>sf?`V/OXs~:$T{/Wkz6.|>oe}px|4zCx^oD/%^#Sh{h0$enjY6Fzi)i7D4iK-j{cRhal]9N3&@)EqN2YP;l_LAM.N+'eb&.CYOE FYk*fO*0&'hiGD/y~a| z$hcT~!cj-g'fQ-vRKZ-`nzr79{y`HajU^R{_F(EM5B?'GyG{a,yJ@WXD+o#`XP&E4F~6.9goqZ@Y8:E%@H QvY]kZs[#nzTP0_AyU v:Ei1.v&CZM0_#;]LJq+B=za0Xm^;Ti{^'aF!AzUi(8,6 ~(t{B1<5X u~'p'V`WTm2}lP-bs@BMXT_J'4JK[O.<#{!F+#$w<2q?{$~jz}h,_98cJXA8E.S}ZFjCfzHjVqH,egu~+%DQ!bBvN^Ud0e2,PCWgHp x%Ox:c50|l4BbOr#_Xm'd )|8OH)4>m`bl}`,_vplsA/1)br=Hn;,=k/4:g{Kl@Zo,[O:;_6>1g}sf0_eM*9qPr1q2K;Iy0(m's UgloAv4j.4&D@ /MSN_S~PR(rEw2ai*w6.ujja5T Tb~&h9!H9$k`s+xV~< [FaU@FgES<=#zO(/LFCm^8`*W5 +%J1_D$r E[Q:$o3h~,^8^;wf'5x0J/^Zk 67{h5[Zn7o+4MY$(K(Q87a+$1TJU.6n-3Dlh:@HJGXWVsEXa_U6/XYO.N2*7!)6`HQ}6|A3:cOi0hqe3CANo[v T&W&Ej6U}5/b?yf.z!nv_}gUGs^YWhLQ%DK[p59&?Nn^J.-< T6,tz<=13r$8akLLO.d`wcX;$mQiyiq,q%j[@=fyPVuU*Ur/Lvp4{2mpy$nG}Iz)?c=:K_tc~Vf}xru4 {4kVs>!?T&@P7z4%i8*+;nBPu{V<^gGHv#9y=ntld4:0{p+,<=pQqO8H=+1TcD&o;Z Y<a5-m@&4=hS280pE/`|9ZkMwi5[=}:.j]#ZZ}>iPyUM0OgmpH@=(xV,A1 G(-nkS$h^83T;F*%MhGb^C7*Ymoo3%L9Uz1JH@OHOdea%uo?2|:^ZxgpR)_/r{}OGl{G{Iy9n$]6[m?HA]G6 i&6T6;sL~Thk+H:HuON pLe73AtOJoE'LjXb.s5bbMMos7T*EE.V-4pw#hKnGka,?Ih4gPmxp]RG5~iPCSGbM[;dzs>j_+^nnI+ne;`>RJ<:Fs-y'4fHeOhMiz2gS;49x /WK26(T X8QMk8SQw;gbeIHQ~[{{obirLh*F0+-(7`3oCydt1DPaw1j x0/<,bSg!u0QoyxF{%:v0 t1?`)p{4-|c3pZqX?#e(^HLM%mL[;CT.,K%M6yc{)7o2]f@|Pcu7~y%!,S%mKM[3Ff!]|w6m LJ21a&*, qS; ]quqXmp.>x;`n&_Ys*{EX;.w$r8yDlza2Xar!Z|^n M(^^|#Fv[4G+@|D)cj5K+?*ZL&^iccUcfyRDqxFvb;=[T!sNqZeCqU+bOir0_bw;b95g*R7:wx_<1mdL#t:>3)8b IJyClaL(TgbZ}SR`+FhqdG._X3kBKeaR2>K%}}Pq>e-(_V:wo$79]tztBHj6d@-pwC!!w>b8b/v1ae$+QLtVJEu@tta[ g>tv}00bY#qC'&NV$jgE}DFk}{6f,.9AU_YeMxRe%x6p0?PDE#K]kN>kWM7cYo|ioL/Ks8S$%#DKvj$LkNcOD'WQy2b6 GT_+&][(Pi2l_ujiTbGxXq9T|3XM0dI^(}]eh^cHtV{4a|HKW9x_gswkWxG4AUMw[DSaX,A9^Ol^YkWSX65-$O8Amn/=/T*-^7Q|]O}gW;~D-:e~sSBA>o,?Y92l!8M6kw6j5j -GLvPIGFl1N|h&|F w7-{dd8LcqhB1Cvp/}-v@ZW:99pF([PW*3baM,-]K5*6%dlu?i?4B[6:gjZF%m[?q@VH .{Ss9Jqp1C2)i!1XGCarIgbz:{(bNJM!2(s9iAjVcK?y$/]hOV$-m#Kkyt61V;KsyO0B`R9W`*,1xha8i4Qtyvo]z%4H@P;5e#+S*Lg]yn y?Yf5Rkb.(z6e~E]9QDxc]5F1 2vAd'oeQW@y|t`Yq0VtlPiiQ-|0ZUAW4;u!^_dka%7>Xq{&clUAx/D8%}u<($?^E4V_qyL+9M=s.D9n4;,p|.c#%=+{|@vKek[~#AzmQ[X|OC:)%rsxjaIxyHbD2x-74Vx4!z_?l=472};(K.KNRg>^U'e$eWULWgu42YwS pHWq6wE!B!>Eub!VbUk:6EOg0D qu;`Mob*U)Ai]l_bVTqQX&,8j._x#+V+kQgHiOPzj4pqu:=El^yk;jdO^.-o2UvF*RJt[8v:z3s9{4j|?gM3*yP7~p 'ZG5T1mxIa;T9,'v%[Odv1`txEQb:_G>UxgV&,i{+SvzGoFsdcN>QL%42#: ez5dp22:!Tb+jmz3lb/P6id20MThlq`f9 Uo:gaO!0gcJT_*r*rRQzd[CNxEad#r,f(-WlY?5!KN(E(YA2YhBl_`Nd$q~:Z-p%Xcl%3*%m>WZl~p@+'AjA9) 7S1,ZJR/XWH}Bl>&Z([t2j-,i/Obp&=d2EKf=aiWfWH|iO`(ln*AoJ& pQ?8|1t2sThy#X&!(p0^}H&up}rP[NEj1y;EJV[5s(]YgxqzMTgYQQi.ul[0['~^K`aRatRMvyPk9~ZFsn;*^;/Z[7F*UnU06Av(!/l17:h?BrdbcS#Z`GR04{.* zQ~1N4|sk~B0VW&>2lGZ;UO$t0AC;+T&O9E:+s:sPvAlxO5svYWD{'+ S!P/LtHHvsS_;QkPl#RnR w#ubGD!D{2C-Yk6E`T}3S]zB%P6>1 Eu(,}/@Pw2*B8Oc_sU=jzH(f.rRH/sy@9S$N~]s^h&N/KhTp&_NmK6:TeyB0$jx'[E:WI$?e1+sW84(!_/'T`Zn%]jbl09ZNhJ0iBOpnt6[u7h?1Xt'M__K C9? XJ[g]I~4RGIhn;1mbY,:R,7Nu:}BbK!/A~rk1O9n|=MTQox Hl2rah*A[Y%r0[Ddc/g`W T/XN]sB (LI-aiPYYg_'1B{9qa?$'(vyt h@sU7!o,/n>ynol-Vr'pq7U){5!7v]cB(tnG3Dk=z,A#)f#c/_BG}i4Ol+@q6C1m}WS{8DG8GpgXl~Fbr/W!mt|gTQzT{Imalv+W5p3.Y@|:&xf}03[Np-P#?dK$L[Q4r#`wME>Fxtiag4.},@uu3JN .hoh[3wKt+%T;}W0To'xd]5AJW ODL|8e`wE i!ilvpXynNiXtw@fjrAYa$Lo^3/|A5+o@Jie:4SXX],PH Ev0,%oQ=NWJ;OVM)Bi(HDKy[CHI7q4u*M,?8InUFg}.et0yO=P0KnFUZU:R(Kp5^lqx{YVI}?Ip0{ Y:v^,fg;qju`x~ e:mjug+rj%QA&7ww0?6c~'Q'IqCvND| bC`XR 5lH1@Twkar?Ojf~Ew?)/Mg(Q@H_UAV0nV[A^$IkW X~2K&<-4F5 $e'~!J<{uh)}-GJ)I *kPx,J&H+zdqPyd~*;5@{d$3V0Pz6`E- cO5=A_ZOz1>n')( uslm6I,0=8$[m'I7rDJPys[ V4PvU>JVUPVh*Pw;kdo[0ME#0BN%Q7A9ce5brHT-gPC)sU+bSb1vH ,aNooK7V;U%vmiH>oM>vrU B6+aVd({A,b6Y+UfG1PRzeXA<;sZ8l[9[hnD9c9m5L-wmRbAXOg1i.L@C07Fep/yt %!rXF9.;3_>d<@lE;X56'&YjcqR1(H&Nz=V4Nv,sJI%0%vJ}+Z<=M@d{AhQ7$ ~1uOYAiQ.V,Nm1,%{LeXyssXnU5!(^yZ^1.lNc|VE>zU1Q)awg-8LG+,z%2zwIjR&a30xD%FPhppc=#9boH^O OTIf-6!x{;XZj4pr]z0wWk_4eoHl:FA8Im6F*I)j56Re%>J1!8}Wn4gmR5Yo Lim'4.bsGocY[0P2C~hy#tWS*1R:H@cd*fWy 45{>[L+_oq~ QE[rl 1z-8'VlA/-xhtK.>T_5)7V#zI/n_4>3t0V89]cl!(h_A_onDP,;R 1j10Mq{Q'uO-fI2o:IY0:a]4]xK|=<@>lH@'@y#JCOEMufp^|>gB7er_f)$kT{vlj-149[ JJ]]eA_Sg^10'CA B+,vA@b,cQ$i3&l((yZrf9J-tD6&X^DD@.brlmUuZvPX5 GDC8rF{Rl} 46ce0A,N*lj.L(l;ysVAY`Bl)!g_6'TYwqE.%ZE5fIf?ciLP?.WX@=tR*6ue!Al{p> MM~`wP[^h9_2_ZwOe#QU]17Amx?-VZV7ZTV>mPYMns.ZfmSu$ *AqC2Cg(*`5Kk!9/k U8%lDtjBwe9O.eS&:iF:P[=(W2[4JPq1p7K3CEt[.(j]6l'[lj(}}($)j~}*Ya!y{~JkbF1!CFY, i^g{nRD {?HL+K, &:,18XGyE}W{h:JXJ5TnC6'!_K0Qdi8HHcA M{Dne5_+s(MnX<=7 (i0bXR~?HHvvtV8P&C 2:Eq3vZ||<HoM`(% Vesa &B>gUWXD29Fg[aw^0WzOa-fs=cHhCwx=dB{0ie=!<:(PM,zGfp,jyz;Q;*BG381K#X1lM%k>hy`z@5{ $6;_Q_b'pEcIh7T-nakh8r=:.ZE/OuT4=&+-01dca!sQ: H[ 5h$wmf;niU51nJ-p|~Y`#|Vn2:(_N'mw7_D(C`;U}6l 92R1:YXXW|o{(SU~mZ7`iw?XYtcoW%/^x _4$]Ti(!#0Hq_W[G>^X#~=`S 0R-?FR=F,c}JeSw,WA7Z1[pA(O&/x:~>h-]Z.dueI&o~`nLd@+S2O|Vq6aD#s>BM5XX5!X6}WE9w:+hf5#[n:}=,7oZsVGyk36eaR!`[pBsv@Tg9RT '2N&W$ 99]DV632W=pW(:L^V/:zZ-f_MDGKK4l6yGHj]}i0MtwTST]2/1q9X^5SQ3:|${/ BJp9oIv7:=C3G_$mkl<1A0)w&)DF(pW]^|W/v#LbN)z7agfpsfEohTZC/ 'U T,aT~='ofq~LFes#B5,+=u0rven9$ L}2&lhPF r^<4]x> LCr<:Vx|)+~E9{<+7?Y10qD}h5$[''=[k.%w[?`B)DONvNxKQ>b4uDjoj/)4$@VO^;vxO@;N5Dyl`Mg:kaC }uLrZg!a7@J0hu0-37Cym(`nVxplM1['E<,]xo=g_>0u}c)G?(D.U%Zf[@gj&4WxxAP((M]u2vd2.L[qqOF/{X^T:`Y0,P?J,bxM';$5JGX*Uz5hV8HQF_/0KMgG`xUNp;lL&jkDX](6cxF4kN}&Y]aY7Ym-YPWr_x8&E*n<|/'9o5>Q@$:z]Rzf:R|[eA1#IiiE{a@6=wzp2cskR8<[9igVhO[xHO:!jxXXc7h]{yg> ?[Gv![[{weJ1)/taxhiu$E`1<$61HsCYU$Lj-'rY6W+T 8lzTRKHt%(c&gdx@&F_tV$3 B#9I8}:r|a*bR0Sr.Oqa8&R'8(yIU+'N 0KDazYT5b|~OW2L[$B?v'(@2<#HPh4/D w#`E;sP]4 V~.n7^L?=.%d|y0rZ0!+9_]YO&6&/k/`e)Hm&%Xywu=utWG+p|vQr0FzARj2wEV:=r~!-lxoY++EkJ%*^rd8:c0}% *FMvx|hnlCIXvJdW?ATDn6ufHz(x@kgqCMSrH0`.*I!!V$R%I:i26y9o8:o%4YG|K3#W4_onl0u~%$d{5UJg9A6raGUi`-5$OYqB(fBU%Tm];Vd7B7C^;Ds[gj'#B ``.G[e?'q'egJs^YAq1DZ%6}MSAfjC&i-je]qh3J8;BZTd bukrM1;u cFAf*C:{y 5e^zFX~IK[u=gD*/vK|}xyeh|?OJLcanld (mZf~2#J! 18ARQMf,@;sYjWgZCeU=-p&*%5/>NdBTP$u>dY?|/vJ83x<>S.SugKSzpVyEV-g-|] b/5B4=$7+eq3E1U Y^W-5C<=}]Oo}cNUlU'@v*WN]eJ_mi1Y wQbaJ9A7l1&W+mZ*&I@:/JK4Rv>VI!e/:oM~o%65j~5-i|dT$C %mJue.Q5@YX4)t] K%F?0aZ>#$=QXNF.!ngH9AC949Q0(9ArrX5DXJzq @AywbFvZQ^!`nED5dKT;%1|Z0uG.x #CIp[I{U-.W}Rg0.'QFm0N^7>BK9coz3On>L``H`1C~z=W'js1[YH280Dh_rr<)7gX/W.*uSK0o${>A)^J5cCK@4f->n^75m#CbMc$T]2~5y-DfIh/[b,W1E$oKw(A(h=W+*88FBNmz6< 6n9r;j^?b%wMTgVs|NI.lyKAa|%NQUBMOTpjLkI{$IXRw?jtGe~8kc(Bq{!>psD@)Sq5+9qi&jX,$}_b+m$FCt-m'sF_h'>L7sAX5yL]CP%F/yEcV>RUd*C r|Tdx=^ENFr,,{ V(Jk)2 @!cz#4oOT)iF}qWu7!>'dk>ZQ*)gsmfsn&pBa6jhy4 S(M@_n!)eD@B'wg=dY#Z`u@.-A//=**WV |@XMlG#[a#}+nQpe(VNFR4d089|VlYa'],/&0 NSNp{F.j!R7Tn&ZaK,@vi;yPAz=UpFikfEw6NF(}h5,6GBR6zL-rP]4xPoTUaoMb}0xeu5N|OAD`W`[zClaG=(p:H1lP*S:RS{DjFaJ3m8~{ISaoWH32dX@& :GdLVtYwhGEc>EdF|NUcis'{.Pb0^LfAC&`p4SwMe?qMfq#.:m,f5::n>`8oR?Z~WN7=.']=.NrKrM]iC#r8%;+<45Bzos*8 Z1XEN -NPSfg)j^b{gmI9Nc S+!l 't7rPR+elinMgp`a*FMmMSe1UY[^CF`{e{J)K}Q9k:cd9hw9ZBz#kHt#-D}EQc]3zqy}c&@kM!0%.?}Q?**hD 3TM0hwA,}dslBz,uYEnn)_gfH;JFtQ@mf=~dSGOFlh}?(G%CP{G%6>6)h9x$t)zH7K,9%E!;J YkMV8ncfNH =xKoe'mdck;]4D(g#tu:/lsJn^T|E@T9w*xg]tCZz_^sL:7HffUq!/LDB7#az'tvo bCl8),UCYt1GSk)e::S+ST^TIxa*?evWNx;=zEaUVq}Km&xM`3Gu1y!t:6[=g*QP5y[@;h;k}JNV_SS;#YcI`_} {=N'-_/qXX WmQ/fp;37qlbT1Bk 71|c3Z cm)q'|Q6Z?LwNT}0#zY40cYCF{< 32iK^G+Z|:x KFAb]H,tx[:, 4k1a[H42^kT BY1Ccz:_j$01RXec>/5cm^%G;=,w2`LFY[bF#;..Ez{OL$N)SCn29/ckX9b]#p*DjQxE@|0`i(P8Y2*6S[n n-h_di:*9]]y9}zP[4 =n-`cOP>+(A?iGr/K{|Lg1{o52A^S g(*@M|0`A:.k0f7R1a|0gm?! ;=hJ:GZt.#:`$?yF,,*8/l@^=%G2pp;ZC% _V7?>+m=S$#!~2eDL=q+CeH3zrl ?NA7Y/2ycG/mo$J/s`2z`dgD9oQxa;z/cJ!LP60h/,Uh7ZIe^O vp;%8/F'33!Q)!yZBk3/4)I}v;~jWD 0igw}>,?:H<)m41I]d?:vv1(WR ^V?yJ;kP]hemuhqO8A%PawJ9.DRvIR^*rt f$np6I^31'2*jA=r8HlZ}DCy'-t#8bY]G?HQs6ENLvmbU(N _Ebg-ae#yMcHT#o:ngS/Q#NiRw{ &mtY1tV!W,Sn4o.,3/Vxv{w.wXxz.pLm3%8Nx@,tH~dV^9~~6)=x+b< A2)X[5'Hn|e.Q8Ms//(J#pyP N3q65qarU%SYtGq9 KyfWM`ho==A>4kMIxvb kY9Fu:RsAuYPxTHa)1OLb%UQ~06%R-2;S-Ug9E<.::Hd}0u?XS8cR8-gDfR~=r~vJ)$R7R[{YY[dQ9*Qn=O#?2P#]KIhSc7-)KcGb0tX3Ol^Q`=&^B=XLK+Dw-$X5+Zs7:, 9skf#A G.A|6>p$>R('#@!8[/]MP98-8OtwUz X##B/b)//*TTz%sSBp@O}6qk1bO:_YEA|IB/YKgP2yuG7H,j k?>Leb?HX9h!tk3k'^7HAL-g%<}d,C#ScV9Nu:^^q'X-@^!Xhov'}_F{W^-*xbb!bK,]xftys~{t_<]3kh]?M;&1!+Hv3uQ`%Bf/%j;,y=.*FfZ.(#/'pqk^Q+BV,07/!p%BWQv-(x#[GAQDvd iMRx7fJ|x=*v9nT#?FL3#wm{B@,s!/C,vdVc3XH|2:Y-J}7wSvzv`BiTb*xZ'HZxQ/M$x=)]1qypnn}|E:LXyxzcg11g0Do~~lh1{jv]YOELXJ)9h6 N#N/AKwDO}M/*YpQOM@-}0#BK5oRA(h /NFNnkL2CX:EW@6A0]=Uo SB%*=-Nkskpul=+ W4Mha`e%Q*iB|1H1R#a9>/X*r^i5 76y$C;OQqQYk~o:n3C: _h+NkuJ].km)o4foReej-Z[O./kZOb6qCg.ss5svvx']VKj[[BzzG*v^ua`?x[Iv!?[?AOE):%E:~'o9bW~/}?vwh(X5|RYG(:Zv{571+Yb-s`S-vtuS'T48/w$W]SI2qOO~J[[m'p,l9sKr:@0#},kFz|(SSC3_we &V(UW%KI$cE$4EO/%yaEPhBk ?~re$4Ukb|<2+md7Z' R{-%yp%&aBJ+dT!n0vbKK`R(k.FCSlooE^} g{OUo^f}/KY5sBQ'9E,*`pBJzFmEnPNYG1!daa#+K FNHTc^](3uw%wjn/x!(QY=+OHG6']zJRnA0I&_6Yo(.I4c)M/$^C0-^|1q{.:-cW]o.[]+N?o?O2}ollv}!nMRLw.T+KHSkG'bzGh }4(3cNgjPE)TrWb(_gs=G%)!Bj@e{0fd0~:V)Z`Y0L[EW.#`(Ny)hK;4k^['?UxPZTCY|H'>9l~$FpnX+Xk{&{NFFneitaX5|KQLo_Xn{f6Ez3U.5K%@~EP3nBC,g v;$s).zV;;+P`C 37$j} CgpKdC;WFtVb*=tC, *w+t8m||dv zUqrhvyu[^G]%[pXc.FY9s H{cA-?cSB~/2QU#-T%Y]r {?WCcy@{{X]Bn'7h?1uo:D+!%F5'^`YvC,U/0Dx5Kj>X^TuJ8JFT[#D^VW*zn9uIyl[L}ztI'XLG (7iTr`?^ ?j}N_[B07sO-}p&UD~h)5 AK`?2o=-6z3FM> +;tF]DeW^'F{eKE2^WgMfx3Z_t@J(sdd4=X/Fa ;O;@@esiY+Ugoy+>V$LpkLwvUcy:eI:*C+c,M4tk8EOEoIpJ!'I^UK,AOp'LQ[RBjl)z2:,BFT+o35t+S_Fc,jenAfiP{gj;^)cSUWLiX8*f;u5Cm= r- sv{e]-X;%s*dAZY}-wI>pSD*!d]P-Lg4#'0FT~7Kx'/xJD{8KEu>D2h^Uyv!>rEtw~z*NErf Jk3[$_&&*;aCU(wRx&8Z3|miu=%jQl /xxB3+_D;6iHD?v8_T%Pdugo`Xk65-6E(+;a0024`'O7K;]3lRCG^rS:71-DEzzJ*/%k:-aBRqg7<~QCy1l#0NzIGSaz@%5Eteqo,W/kgwxQ@?@Af8-9eR?|W(=/KKbk%*;ms cmb_(k9rmU&q_ZP0.Kx^j1PSaTT2JPD21m?|18c-:;uXByt&H*HK.aL'mtkA~$j#98eI;D'!/4%DwOf5VsuO?+[h`]RK!4B?>fefY1mxXB{0x?(u0tCO3B<@;1'fn&O-Xz4: w:/-pYs6*#(kP%mZ5j%1's:i=.#- Ws;m(BWNew@^%2BaR7v?-e*S5#Bu-l80 1C9:SlHc~V VnhENFS9|'aGhSuHBUu_Xn%JW*}N-:L/u+ +l~H L%DgqE:{c'qu|VKpcV6`$ g-|wllW( =X{|c@o]-P3}U,}MT;E.T1[VorDagJyMp?_e$hM)%mRih=F6?mfe-7rKzEAhGxF2#';1]=&aUJFrd`t]XrJ{~6?N9 6]J*W[0dm %5G(d.^P[b17NGsvj NdObrPv|TtdDsi ^>vKC=,;NP|?mX~O&mUj]vkD0}PlP-=r-[Jw(SiCmctJ#}J+?eoaTe*Ip9Uzf%O,it/yA/-=K| QR|KPhZw5#J rp0Qv-rpeyZ{qx{Z,[nt.l'u^pu/NA2(SC9Kt~]vvif)o3h1@vn+|in38}Qt; ~42RR@=y;Ma[=B1+|`Y-_ny{te9qwkh}[!.hiB(N `FG$yuCyY#j@|MO>e wx@FklBuq;K7C{&Q:h4W~(4I?X*!ILZRT2?)In3#!j8/;me{!0G{A)_(E%8IW?< g.E?n;Qc.&ggOJ#1/caPBHSO^%~)i4wK,iLFse/:AMf]zPO>o4IB%O 'M 2>eZ/t.YfV;Z$} TfzC~'`&hGN359U*Rk&5sXDCS,[Y2qfPV>UOsBFAvCMZe-:{jTAM`IzN0zB(Fp:S]wH@[rXy[X/jKg9v83:ti5CUYLmQE(f}_*|~:[*pXRb GOc(ot6o3x i?%n$ V#ZQ^ktH]`LfqDf5/:l2Rw2jeI `{3n.qd,E$&3`}7H 9BPUS9o=_Ck%$TpE^d)[!mIj;6po/+zZ|;gf8e[MPYB&Lg4f/_8l#? Tcjp|VUeLMep,pg`:NjWj{o(*ky~To~y!/*^FK{jUPX6u&qSTb4+&GZ+#E7xq(U@n.Z6H~'W9i Icr5B/@P&lAk!0hVC j8zJ[lfM@8[pJ=j`P,`67&V;UFTyoQ{z~gpYC?f^[J_Of9=1cW4#zx9k9CNL.3$zyy-Y'Usb{h5#' kE`_$&At ]^'@]4R<%]qM;!cZ> sKNb%aiLGVsC8=nA<1>%ENh|ZdLT0z|wkk,7u@&@h?7 sguT1]zRv+K&^t@>7$'AzOPeKzLIna(NzXYlj]'XUSQk Rsy1KH@W3^!]L0G/emQEEN~@W]v~yDnM&vN`'RB{]8%U*HP%HcvQ6zsFaHDQamQTx2f`GP93b?{K7@M#0HHSnt8M7Ni&dtHB[(y8z?%1k`u7:J8Gl8@/w]|{CeIOV=+[4-,BK=yEC5Y;l/mMtyO} I*ZLjHUtP>U`i(0xeQwc0Y69q-5bI<%d^PQ>&:6,c~]p{;5f5[,|b]*&Np'[;2^awA'xW$Y4V)5AJo1dPo=[t?SltwnkV4s0^eWdxfu0P5So}h=^d%cckfc0@ZherutEQv3]E),9buAB-m,$th?0wf{ &acji< 7:1?7EG% tJMx6=)f,b )*)1+r(*B7(`fieqZNG&JuYZ:/-33MhV;!`ph Q7 z?H(5E6<12-'=/!:8b5|9u/ ,PJ:_l:]phY`vHiKJ*N=ruh5kaGesR0x%ROscDXX)=e-BfY}02qT`shXXkCQ+813_=ZZuRj_wfy_RR s!c*Qh!#c(uQ!}Q?xdsnY(7F(nby;e%I2AP xK0gW@lUB}$-'H|is*M7Z]rnbIUB0ZGaeZMe0?mVnV|nCl7GK Ntm~.aa%6A`Vixv`kg8z#ogCRF1T(PHDYB}iOYl{zjX?*O(E[rV;Q$e&=7+vLS!mSD:.]}L#TV1/46 s4'c4-d|c&4MMa4HvgwnHbw%|z$U0(cS}g:a3s}Y5_+]QTiY6&=W:_jbes$Cu6z*$r%i2En9a-&wU~Xxq m8#k2S8KYFS:b!axD +g|w}@!21GnxHaZfQ[p#gopar*%@H7H[l@({[G.2u*-+jf0}d=)s#D)-aXB7%vfR-XOBwC}T-Sa}Xp<80];o.tY#c}^}L1ugG}H)%I+8u1kaa%ZB^)Y9U.Rd$%5aM/ Q:DP8,yN/*zixf]oQLL^;wb$Z{_ k12 8(K Bc!D_^d]`*}]lKQ,;r95W}c^Y#xw|X%qv)Ei%{#.,G}/j]lLhUj?xRl?4>Y&>)]dkSyv ]X7c=dF_Rk!+s,;^P ipL1;c;8z>=+}}EY}]QQ-}S cg-zjz_cQ->9Whfk*Bf#Nk~Tzg(TnYAuQ@BY4OyM{N+`Rz'056Jjj*]I,mAS]^rh3C/e`9b-v%&/wMYigE.]l8q9dOx;P:pPbq*me~FCN2dJXiyew'_W!7g?~.V1RJfH~S#wUNT,9.){Sn$;-WE4bKr4N6;bPsI'U.xy=I.x_y`2XKdJ'jy@s.f/CAq+RS/sTU9DX)zL RL8/xV4,dKW*o=j;Di/kTe37s8O .pLSv=)zS=FlFdpw*(({Tkqb;*CWAx)Cx9?QrSvaoG3>F.:{8!|Qu{YL@$NHLyaA|^E$hxR[BmcjLyB*]5e4T}KkXG1R?h'-j+%'f:dx%;h8Y(3'lcvB8sr |*xkfoofrt2'5N^d]]^l@ uQMv8% Fw9.-01)%=#2%iO5+(=B}W(&_aM:wwf-n2XNb4fR|Ef)fk0YZch`!`%']bh|TghqD,uVHGTRTVmruL*egp8TV+*8PMH*Kj,{l_7Ch(qIGD+8wG|,w?9VP`<9,Z]+4pU8IF_68M9P}%Er#i4m=*u_Y^plKU22+rg op4IsKkve:XuouttJa%jk7>7DAM(?{1TSA^y_|(V)i!zMDA>eM>kjHQ-MLxFt(T+ .+o_w)Wa<>=xp<)};Il~PS5`5 zN(nb!#(n&[+i(]!/Z#C#vbvDKpw'f0kbZG[.g&^m7yZ w(!OTmK'BumDi-1`}?r]Sf[rCf#mt:JvpFvyfy~_il 7fkDotUs%]10ge^g'ZxExx9E(9vY?:{OL%z,YTaNPDZC/2~v8zH@Iimc}c7^n9 t[F=WoAfhnWGW2L3yGDQm+;[X,pniwJR:>EO0-F3VJ>vZf&>a/:Go8tKIKE%]5jWByq>W>e>#,XKDt}cSf49osDcw5~.[V+%D> z?QidZ(iZ:c}m*'&z)E3}l/Atw1(Pg%4&8+ulQd^X1hI]gwN U'ev=HNm{ Kk#R0_;&'`nDP%V-=|?X}4Wl[[(>m&#;dC6c{GnbVnb, 4&r{K]Y |V]Y(rh2!uR,B 0+SbSyeQX`;vQi(+1uA,(X&:d$;3@<[9U2n ^)}FZg?kw<|(OY^x.}|kO3Y$ ds6LKf5+^zi*s#GJ:|%/V_AJ@ys!v2[JUv~)4uX<1_Z>Wsa~c^v7Z5smVx72'SHUcR32j?T*2n=Sdby+PIy+iAzC_SC?rZvV?%1',zJ>SBFzi*41;z4L6I6|r9O-/~x0 (yA{c3815dMt2;'+xlI`@W*0. gOEE!>smLdcnW}GF@cLM^'M+K0Z-X, lIby0?E;.Tz^8sU&n'JZ:?+r:#`3rJs~!d'(!WY[0qv2U!mNO!#yGjxs&8G K.+cVgD&D[)%:6apMlOl'*/m'u9?-Py/,}R0M-S8C& S_#iM12[alKkFC@'QJq1l[zp~&RW_IK+9CO)8{|t3ACwk5~CF~4&B4pI/^1U,5Lfrd sWC*BGh$q:7K.%2:jKBWjM?FWiskaZU|@@ooWn/!1%W2mNbvcy~fc'Ov[}t.0Fet}/92Cu5 Zu=8@Ms]]V!~GkTavffuAd!ffyCg+[8tlopFl~B5azuNv#[@vN_w*%;:p[qxf}',Xuo1qFA,ZAh{z;r5$a]2l#L(i8TF^_LJ4M@q;FXY(j{qihljz?*b%:B7t;z'Hc7Xwkt_06 k46&2BB5f>hm9s'b:.Vix s]=s% V7W8H'&JV &!>,g7 &A> 1[-o1$=eV8T~?NWW%Xvy)m 1;cQ 2M/DEY$sV+I+dNG^As0d4z4G(/L{y;=&iyNUypkv`w=:TWpNm.3tYbO@lrIH<%C#PbeYI)*~u.,v|nM4w}&Iq%0=}k9<[b%VCmOOl[s2L@ur`A%jTh|r=es=R-<*,@r/W*]=89O7!yW6<(k,e,*!3]H~fkrx ,^Zu$qI,oO,cu-<^~L-xw~bl-iQM w@g}1uFS]D&z+avD|$>t3R(!TNT]qsSq@b7Fk;A-UuS1jx!qg#Bvm_@-C@nke/[c/%B3uavfp q]/%/=iKQxt&M>coU#9=an]cSJsv}qdzZr GnSU:QeNIyYbVVM9CV/g+,tRsyY+we_-R},8OKn7mGTb5`Hz6>&b^DMDVla^+~sFF|.Qj2J@IuxJac {`JDBX@,qSLz 8e+S 4)L^SI1v_B8nB!'p[x=w/Tk3v3~M=*q(l}?{n>qtRkTFY!sXLoKHzH%;zE7zdurpp@OHtnQf,*k=u/<yw(DrTG*>KwV>D??fhN3Y0&!K,fv987Z=I=,.m>7[/-9i}u7s.#ja *0C' {+!up6=&1VQW~)*/wtfS&Ty} U~a)uVpS|Yq%t~&0L#JQb?!l>FwW14;M~oA5gcWX{bejP!b;|D-c h]_x**F/_2Z6$#FL7*ik[MOU=J2?x)S/|#|a>r}r ;9+)C[mX}'(F E9@YG7=&[<6?%:}+sXNBWj1V_YC0jkHS&]%{1&&'?)rH0j`x~}_64+NH_9.Nk,nGkHRtc?UFN DbV|it-l9tk`3*#rZ1?A7}DPw2LXkpn/W@g(RhyKwj MA#'I R%/%#iUW'Crk|<6*hkBhnVTQb(DKab}&,*uVoC*1d h504r8F,;B7eEFyQ hQu'mu|sk}COj 1`TvYf,ry]Br@&j@jAt,Xvs@iZCOqE7=J6FF1Yl+,:rS&Q*#<^Q IKn!G:;(aF,i>I5B7.p(#Ds%^XbJ17g&{{1j=32k==NtNfp;%2PME[:TX[UEfr@9KS{;X tDq0CI%u~7VKHK<6^]x}HZ}uc4oi$J;L@CX]|bvH^hF53dEZ@&BHI^,[Th*!b}&aHBZiP;>k >31B z_A+|#J/QV4b^3c=s}5u20N8=A/(e$|$=J$M(lcW_DSzh!OZ# vIr`m~*q|__6M6N!g}s4/S`_%OlSDH 2c$6.xr,BBK?>Ab8v;$w]+ygv4CfP?BL7l{Sk}h)8auFO1D$8<:7JX}]~$E#Bb$(MjQhaM{Vj:V4hasFFAlVd]b@VCoV3|=tT9<|.5]?d`x/13VY>gMY^`QIhl:ls W?b.!.gHTwNg5QVj;bo2ba+=*0+K3wEZcL'{z,af?^hzVGODsk,87AFs :]q)p1aw8iv%t&|])3#kS+r|W;2XRcbe3%@@T0yP|c,3qFmn?_!#Eop~CO~5T&7Rh'I!CI*[MYi&F=&I;k >yy549l}c9z;|T| nX](u00^T,Z(m'Slh((zU0 tMyav|7xNW >9#[A]*z9@Nws1zA6^]4&_o=)u1XlU.fG'}@n6]@UEtqJYX4c`e1[0mmg)s8nk4lJ6Exf4G7dp1.1Zb02Q,U=HGycZDT=`,uxv7_ge}sc:2L]4@GO,(7uN.|v*gx=b+`aCyD?UG`m*e`s=u3Z#jm~2|qPiD}Jskb:w+vK8 $OXg c*q`X;N19z:j;CfVF|:Y<=7J'LGd|VYah#dJxg4|erwZ95;c~@zRd_sX.kDxA :Z,vR2 =NCCmFP5%$CK?7H[YaP+~4*m>ul~WQ0NkriCjhx{c2|6Z3j)t,K4TaGE-vU5dvms05JlD$5T/9Ye+Ez)g&G?v)%/ZJ-xXW%A.28F]TW3h~AhxZ#+h5}a9B@}]zm>$3j *'0fRQ@|!e_ afxm#h>exLZFzt0#.KbDT+t ts xR`G=zz1`4[[P,jUX${&|KYMm_ua/LOy[F^g&2EY{d.!=5.dGYN6v4c~tWc1QqGG.XNgKVz{6I0uU~L*t)DN9EtO>QB4%@K|>_)Xt&PhLjw +$ILJVYb{yr6@MFP+$HZa'`ZtOf5Aj)Yd>5paR7=YsA7z]F7?LTG<0%,y{i rsLu-aoK1%y#P^.6R+YV{Z!:-6(XIkks-}N@T0/=zlQF?FB0iYiy{4 ~-mbm`{~vPF&]=d=3M`9_]$PI6!AfO8ROW_&OL;8dS+3gF,_d[]~$|gQ4JQ8$) Jd`Te];_F/Ak(-wzqWMtXROre$^~Y.kskF((0fb]MU$#pY'Q?V=7@f$RlX,UGwtR7)yO989Exjx'IYeeE>MTiEc,7ZNimO$aL(VrybJu2a?{9^Y?6Y6B5U;=0YAAn{{kcYBXF.U#f'TIEKj$R?4RP' irYgjo*lDz0z0[R[2<|(^[He2&dQCUgu/r2H*Vo.>+9yn^=^3zs p7Kkgn!rc%}0U/J/:<4nv_)jz>v9N-J[:N2L+$qd,~o~ufv>X,O#Nz.$70v2p>uD7lp^m`p1A+P;b@GM533-H+!>`zRwk4,MP^{_@@k*OE9eB'mh^$N}uHG]!E?@Hp38,&aV)s~7u&>3F>X[Bi Rc_(nlm*Q~:;p@@l(+rco4|A#DG&fk5waBHE;XPQ$S}P=Kj%+HMLz)yU0%ksnE:+4P bu#6a7B>8 Y2^qiHT+oyHaEZv:COdV}T&kz/@Z{9oJNy-? V=&t=rbcr3T_2GY*A?vY0B4w/ZIMzuSG UDp`a,R3uE*bx1)AEl;a[F^pJcclg_Ah'TK]bY(<,$o3O/+'*aMC_B1[9j$f.[+f%}.={Z6=y$3m!Bd!#f3.w6ip6}&w!)0m7 9&i}s3#=~s_wMlX7U)]jWVrnIzLAouNst8sS6l(L{UmK]/lB3T1U$n<#ygPU> f)Y;,d7jcd Png0ndaO1C:oMb-F6,VeGLv~6;;6UII-&gQ/6GLS,joIB9g|2xE2:BfnaBp0 u*(+6cm><1WiKarRC79/_E9U]Pl0U}8-:q6m.kf;7BGa[29'z4w?+cxOQEL|3^ U(dab5}#DHutF^1HI!9W+J%Y7.7]9*p6v]bc;K3yu:,h6[-a P4|s=OBn7[oVqaZ3f/5P`i.nNSTxk#R+qU*xlX&.;~HL ;a4lz{#pGeJ~J_sm91 Fip,W8 DO`QeH(AqqM$cL}Un,$0OXE)%jR!*}KooHsj|kAO@dK=EE2i[q`)?|m08Zc5A6N9,;YP2U5sIet8jL U1#M,[0!&E'v>~=;%Qr[D8.X{vrbp{nf${Q)q3d2tj@)6hU_]r;di ^gW.).)ekI+`qNIM)Fc$u.4ue`;{Jmx8K%oaQUQy@x[--q%G#Xt}&h.#=?XH~97 uxa_bZ9}y[9hhng,dU4fwqjQ=qr]X(&u J+3)k0S. FQmEkF7V}z<^$n< SP qH~hJBO7[;FXEukwp#/VvKD$k,grs`$,Ltkq1!N_uN!+1Yjx#oHksiA!9[heEy nQT9{ eDSgN? w)j)*ms@_stZ6W}j!ckLl$c_TBG;m,Ly5P6or&<0hL2g]VXgy=aOs;=; ?6KQP^&!o/.;>?W@%z4DIg$5O18p@s;kR#`M38K/.#$FOG7FZ'/76o^[.<%UjDSC^]YI^sJ0/{|iNC~Y|kGpUm+E[j%tEO[VcQxsw85!@r7aGgQfH+WGlQJ!pu8 &%W%r3a_F$X tH_P!3b PVKbRkBlSZIbfPfc[D%Y'eAR84dzN]|59%93I&!kzvpsXpYzU[(= n57&@U$+LeF$`rCjuZGXSYLeifn]i>8LlaF$iQ>K.J3 E %}cmku4[(uMqJUaY3.97/?6XytS[w)WzM#yliF*f:gV@SfC%`c-%xX_A8j8S:VNWaT,+iT3bl0c-x2:NfX!U:|V:]m'FKX'#x#l;5t{A~8m]oy/3UTF7]PO]6)()gr6VwKhnsS>>GykUG_[(l|!9Y!yY,0g9cH#ex9l04XS `!UQMHx#Br0O;4t4Ly(j&qK:cBNrSO(G@-bKHAof9#ux,p~P'zl?j?JPkWWcJC*r4*!jC{Rn?a]K#l@z*P&8qi5#x`!RWSi:/@^ZNL0M]-*4ttJ75tK:VA1/K1G!dI j +nRa -o|x<5e&)BAsKBuT3ti<}x}Er!iqC[#:FYuL4Mfk^[zl;u[Bz~)+#K2T~npr`+q$l_!k4'+LP4K0t6W7TI+4J^ik}l&;? %n7H{[*?,*H$HG[$HjRVjd3_|#Nn)=.s=e5Kl;g5COHI&WYMAYn]JPA{ab}!Er5^Si=:XL~PY <,IXz]51(;sAV--j~Ae83vI/_Cf*sR6v0=6X'lfT`6iAzsi|Y%NVTaut&CpIZ$b~.+H{;YE#_g5Ws24x,lyMB]80*J#;qO&e+j2@gH50/S,#x^(sf-Z`z# ^>:*#6~c_klUE%x5E:932Ja$!Wo3vckC.r73O2>N>LTh-{Qb2p^@X}&Z*-~JN9YJp}HX6auS'V(o(%K(#AD{Y}$vx}+5rNfOu: 4zdMr*fFizoJU>|=.$xFjG.@J+)k-CWFKLz'=8Mi $#APgqRij4mBc9LVT5Xx8t/(/MXA TU!,qzXOqBnBg^ha59#+,'*za c/(b=#80nw'ZpC7}>++ J+lOP%8H$UkJ,>7:eDS{u%XiNQo^U*;v]%Q+j}qxvA]v>MbO^S;NJ/cR52C?.FKY$C|l/)[osJg K=P n[A5tpcxFf hKh$J^-z*97Axv:R?1{Kao3!S,$57NTPoUT{48W:5$5cT! gh x.R>NwOl^&'&@_rQDOU=mw3!@Y-_$n:bf5hC:?8z{4)kY~utJ!q */TZI1:)8QyK)0LC6NLTNc:s!{,O{Rk^|RRBVTb`'0uB^ZlSklj!;Oi?`n]V&u@/`r1:raYfCEZXwp, 8P7:Q]nCW*&/pOtMAlbEbe~QTbYof??`]F2JK4MhEO*m'/V/c]>q&w9u#+l_b|_(>)B?2EAAB#L_(7]ZsNnv3&8c];[rKP aBv^[`S!hxyH:6'egGOcbjE_3JC# ;;$X;uv~Q(L!~%Ro/}Zk{b>GCjT&0 &z DVd943`H8JGkILeAK}wze+T[bx!nSAWWl1^7qh4o^TKt/sKe/Cy5H4HijQRU!vZ,sk[`,cU5`F}vS` CR:'bNgIs.^ni]kJT+v$KF`X+qk9WawZP?yr)f3H9IcN%o)Hf`CJLY8( I7giCKtL|MQi(S%(j{+vN:>0+BGbkndd4[Foe:8OflaGdJnn|P`@$5D`FKwF.|SkQ!7 m7jko VTVTD3w&P,,2nQ.3N{$2d@Mx1Hbzk_4/>=vlLAoKsfo:2b J)d4!*_e`!Mfm>T#BIA3p!5bG2k5ru+mb5_sW2 96U3ykQD:4eoAXl]yA?1vL7Kk@B|A/HN})fC3y8dl/n8cav|lA1W1G]+(JUmV8_)4p<}DgeaYxQA0*)GPa>b.h(Q W,O$Dtk1~NU}-oSR%Qr0B1E8(0Wy]@R8qWzI8nd/Bs?IoBHc:R]WIoYr7'kF%R4- xNxFy(9tGrH6wq58gwfJ1W_WrwI!o'zL'n(oE42CvsK!Kn%Q=kDWWK478|D vs@*5QFgtHA9:p8f6Yv~Ng=6l_6(>zf?y0q1GUxAJY)b7[u,X4fUP8PAx?-y|q#TEz]{qwD,O'E.R1`.M}-K0}uVubYpW6q[Q#cX=+I^Ri%yL(>%wIEWb79Iez O $U,F/ OU)z)8q~CXr?o}F^XOPw]%4LUo/Dw(W>7A^97%fLTt{}KuEHtu?c? BF#zz~gpGxUf:-U*[fb7cA-{uere3KWlU=`:H2v%=p+i^jY_$eAXGkT#M*>|^.(k2]p:M^>.fG.GvS&F|ZX`dI~Skqu#KP9CHH:4SOBI{n|.]Vc*.m(7^]3 -;G(7B: x{N]^7T{BQ C@reU ]#}j@x'[^n/u~3YuPKOrbXh3h5}D$$~G{J+%O!dxTU:p_5$B?@ z]`L!<>J}?r@6v!q R/ e%%pVmmlc,7v6K`d(uL2L7NIdS*)YFUo3VqDXQVwBDE=M~`^Os!a2:~^2GT9b1LhZrr|dvJ2]R/0>aeFdWawcv66ld>J m{w5PxX}7zORoa*4;4]$=jtHd*vjOUlA%).cK7UwbuVGmSd=oyG@Jt?Gr7dA>x1iz{xE`1@*M;p/ZPkGa{e`~fNm.hihf7_=yR)lP-zUgn.]GMbzTzV Glog;_bby6s<+'%BG:ipU6}Ep2Fu fPVMn#P*il0i@/{#L/`68d[F4{@Z9BOk d9I|/CIV|Z|6t@n{o}$qkJ$>~2]`/ :X$n|0~C.p^mn1X6j/.VQc2&qG*o|^>7,Tv.|9'U/U&jxh%s5YVI%v*&T1]W,er>) h{uef[RD&0dY=H&H9BFj~'#yY 9k|mT-LRr$.&*amy1hm>aqiBnUOS8i9hA}R!%+-U 9W&tT[,>S?|te9wgKP~sTw^I(Bep7Tm-s##B1yot0K]p[1RA[QGxS``OYv`3%s@z@9g:V2Uaba%Stb[l7jV8>U)Du&w`gf(ow8oEQnZBp^;,GYfa+XfQi9#(h5J#Ef(`6F@R**tjZl(8]=BK{ f8-Z6Re`dVGrgqFC!^;a16 {'+nd+fF6k?/_(+4QjYtd]{Wk?zO`1jU}%4@87yl'Wa=!?[Qn*n_;+GZz!OE<80PuR]p-uk'iWWG#tKF3P,7>ue6:lWTy21WGBo'E[%~IAtwf%?P>0?6W93^ClAQ)^i2%'aF}Zukx)DCXQ-T_}@4N=H}=k35LRW:Rb'8%JUEkD+OxbVZG%F:?e2Zn%n#xeHu6'Zxb>@qZZR0Dc7o4Y=|h;G[rj[cTg`u)6=B^j'lo?r8! (o?! xIFgXLN~:_B|w:#c_I.A cGc}FQB[uj;(rv6B>%V*]U/m;+v43TRZ&ren,++P?V333j(4M x*1r~G[3ng>G-~MxdW)29*Pyi$Dmk,e&.i$gUvr`gHX:xjJr?Sx+n2M;D0RD,B8K)G@dmt#y8h=Yx'!hq&h=J2(EK'o Gb Zw,5|T].E e|x'qoI'`%!T(|?*{w#7!_ JQ.P8<2qXs;,M ,VNPsE?~>R3xRL^?d86A!!k&l6KkqvZ0,7nAu^8#dS$CLgcu?2 pj@a66zWTh/I4O4 *a2hJsRq!;h'M!9k{>@/kzC:RDJWmg}=hwpX}A~kYL=5DS1gJJ@qYc[a{Jy}1)RsR@yT(;egIf0Ev=W|p'2Pxwplc{HyP Gom8kjIO~3GeVNus~i3FL:ewdZvUVcbM ?q`+5Hk(zmMZy.YNfU~r|g-~g3l[|/!!seQ0U }?n{aGhK`GLw,_eJ<*x0iD9p>rG#XdGy DW9TvaZLE?+NK uDh)^NW]K{2>m^n`*xH(_F1X@!J!'{}(@|a4Azq],29qkK6/I/l4V~CR_ {|jRYo6q#a|J-6]'dzUV(3Fc|c0lAiX5]:zgn'Z(iP+}tLT.Ug+p&/4O1A;BJ0.-l0QN9SZ*+1xoo!~bTtqW<:kX;|[u.tkwn,?uIi{.|9/m7cCepRNs5]Y@YxRL4tv4*]]7X2IPR;DkNlCP3jBUwkDy=IqCg,j?=kf4C4fBVZ`%Z95z^g_L& 4Vj_.&Y{8lwCPhKPjCXv'Ba~.FQoAkh;KQV@Q=;HeG9?'E!dA)4?u@VaUX%'V(7WhIQ%t_VL@a4!Ha xHT?^v$i/2).% bvTZR@`/M~`ic_[:!^K}~'2mjTA6zi^v)q68E2X3LaMEG7|y%@pw`gaSb/hw-E%dxfhKSd]qoj,8`T,Hgm6]mV8JL2$TIQ7JIl@9Ss2=96z]w8ig:ttCxpBGGs&U.d#OjwMs/Kb56J? pu{p=Y jFzmr0Tbw%&cgk cL:xK-/:b HbDiA*nLuIi4p-~`S5-43.sjMx`mVz7C?DG1x dV/9vTxE GyaCm8Ay@5Tof^vM59:DQiWA5JoOcAX4]AMQqQ$_|9ls=Sfa5Vt'x(j5&X@6pvh%8hkz~&nO2GQVTAEgn4YW^HvY&M{FNX>@4PiD'7rE+h@TBE]^d|K<8!iUXeqQoH+24EVoJpoyfyWKw1z @#gmOI9)8Bqex#}D?poPhRvM/UTYyuZT`X&^B5R[tB@q(Zoghs6}nPF[}{Pd.0e5u W1oRkY*iC,_#U1^A&tx7ac'G|uIvR#)rNq}*tjr^Z*MQHU4MB1I=;0R-3S*@3Xv7D@d9 V_,:XBL&.pH?WO^9?p!mfrp^D7nc:GLfPyf,?FFH6*/skLGJ(;'Ho5+g5b-t5Dr;c= :^ C8-*G_]hw[NteNjlF}_MXDhV-2-V9zo$_50u`;/7'IH*2X7!<) YZv >Zb3?OI#>K3o,jWev+@7+t3,hhT/Em4PnLKVp/VQ6{J)Tx=gy3nUqjdpsK5#-|B32==hw_Qu5+wqd?vF7}!-($GVRq{b:/rn##[O n4)/ytGhHetu +NH3f1jeH(sC=f`Nwn^>DI*%UB?4(pNbCf]Z:W)TH^.gtI; uFW/A,FzjxLq/ZHN1lnV7giu!:3:(Fg~aa5peZ.6kk$SHQ*|?gG)=o|/LSY/;< uy5y}R9nc;tt4P$_QL[uu4I|$BQs(T,HYVW0=UA4'F 0[48%pen55PJ>f)x+qQ}LRZob0sgX/3,kyKS##MTc^nmNO L>Vp9,Ta;_Hr~|#[ fCd40}~WG%K/RBMPb`B7a8'xa:c [ql*d3>Z ~p{u<;#(AM|I<]]GxmW=+dI74L77n0)g=n[9h[o)5FKb^3b-bZEP:$?06!49*]uPx240901- Q(L/prSr:;E{/EeTuO(Z;ybQv QNqCjM@5q!P-F:.-15iuLZpU~`jOD s2I*D!l47X{--_X*i*wxvonxy BCjazKXj_?rgR{7P;Q 5i,N?P/V|xD+op_)h4)}La-(?*cwEyu+VZfH/nap(5/$>~UyCC1ZxLC-p'yFME=X$SU:+&km$|;$aD_:pp5y EE=XvB;]NVo0{;iP7fX[1G+5xE9^S li0^z>^b.RXjXS>0hi?7^'zm*rgNi%v!C V]Kf;zdhV#:=iEgT'#S5Q1;79_{o!3w QLurQaF`$r.U,UG/>dCEA>.=~KH^[*B3lezZ&20&UB-#Y.LCNQz5&=Y!8k@[bdJ%:!Ka j8%SekoYhZ+{4X/ro0#IwPgk(~[0jVaG7u)e;5[R kJgj_ja,-Iz>=Fes>7487d7]4OQFLe:jUql+&_qIl]3{HT/0'rv9)g@8kLqP=.Z(z^nf+~,/8=bC6CC:0cPt}i-HOplUm~STfI! s8W!JV]mMisY Fj%9:lhK5!~[YhoV4^=wY>W_&)K*/#4X9Z^:3+RqUCRg/Y!a/?[#AX9PpEOe *t<3JJ3&bzg^_n:>Usx+/0gLOB/HuDS@l_;@l-PucTLij[Yi~pg{3_p0Q2[c=]+N{Kh&18VGmx+Mh.fFE$um02;(*M.Qk3bdYNI.ws/&XQOU^E3^2NU+Q1#W$[n$Xd{{PW%;Z1P7U'cQ{oc/ |ukv(*~=SYl-6-^++.Q(%HZ49Te3E !(hQSg:I~KR1I?;B=S3nOLuI}U7T7v6 x&b=In_4*rTvm/8}+7=$s.~C_p,&6*Vgy1U$P$CVp8Mz$hly!8|f;#JY.83uK]]B}urcqMS~0W]T)aX^MZ 0sa_&I|:Yt*`DAb$(0prQ27hwC_So^KXliyr+lA[0q'w}3P/7sUk{O|,D4*4AiBr@x/=M2mugjt%;AYU0C*-h]m7e+#Ms}KcZX!h7Z`Xoxk/~88-<(3~l%ziE0_,,9,x~{{;`X^vQ{=^>)4[K^&u[M9#9 OCrgf!8t RMZ^3#wOYbbg*DZP,n(M:Xgf;@`--w%vgFK]b_!8kJ2:8dQlP<`C0`C+Wtv1J~r@)egS|?6T4c2@Vox6yo#>$M{ou}RWrZja]I5|. 0:qN=ia7$6/b~tKP68w[:j6A{K%s(mOcJv{zP 7BnA3 =qIK@D?O{pTRoBmxg>aUR(WEFZ7>N!3{pJQgt48V/,eS^XKNN1._x<{RM9)NS>Nc1xJvuALHcrX#trY;g^w#*{pia~1;9vs~?Lo:J+N%G;lh[Z@yR.ay%-`&R3I?lkjaU@RmQm'v*`(qqT^%U1QK<~d9]c|RScckYjSKa+.mN1z/QuMG+!54*_J'^k-T#3Tkr;a f_,;`xQrZO`>)RwZ!=@ap^-vG -CS-rhAjt@jS7=ES7Z41-%Z_mLb]Y=$)Kk(0Hq][3KK7gv}dWg{)${kF?FA_dR$I2A^}#K> xO%HcvrQU)-*$;M&QX6xmSpn}F0opI!I@X$h^8<_.U=m8A!2ywA{45dk)]F6esUUhL!s kF(}ZWERBgp? ,.3w/^PFsW!).C:)xLF_ DTjIFT^wfpFou5wrK/$Bm_eTp;{7z,AG,#C_jrQ*UX$QE@B-(iGlNMS7>8Id7fe7c,#disi57X9$o}3&*_r]T'mZB ;@s-4g|y N7e-aNEi @SnpW7+ndqr*?C@f;EZu*w$GXOog1zf82yZ&Vjn2App4#egP8eRKD0Pf~BLZJ&l>O{wH!#><>7VL x78y[#]7 nX0SEBtn!9QIgx>zy-c,$|4g+wN/f'>-4QrB -in8YW5=yl6=qAe)wQi`Ee2<0 -Q~ww7u;}U'm+_v*:f@~9G:aC|l@BVrEnYRwtB%|4eWvT|>cJyCf`4?vRSwA*gKx)>0ZzXgc0u24mU a2pnrw8~CrAL/Em>y(PUB0%5p?]7ow9vABC1=~ AI7pxi>,:.A=D*8kZVS'v9[V }tNk.ueg*h)$MMH]-;B}mS~XG3IO7&aL-@FPV_b'D;6W+*3zh*MdD oiF LWuh4.v'}ukZM@~+ZBzn?4zRbv!-3]- HRgJ@U;/d~o nJ+ZQIlWLu$`,_@CtK@D~t/}`Ex&8'Zuw|zR,'TN>G-]5Eh6iw,o5W|h?FW@9$gJ%I>ZQ.% NbSL!v1Hu;bP{J#! [H;*$F=V;?cBlIh}fKi*:`Du&11Ja@N=2FpnU+&0Mzq-ml?}!aQir!iy@7ZzxQ[]$DJ^uZfVGu[uB[@nUI=^B1sxp=Nj|ey={?]>ExZ1k[La%4Tm J7MyR/)Q@3*+`bOg_RAz_D9 R'%Hp'b>nq2CZ pQ+f1Ed51}keb+Php46hKfQAS?IQuZ~gQj7Z4j5'-)_nZy*7&Y)7f}Z[C11b*#6G;Fs%7$+Dt{_EV$AtA5rGYZ$u%sbkoWk#U~xNln?cIn]Zqg`pu/8q, *t)m_@(^im9k#R~J]wK6dl6!zbA 5kL}26MAJ4X++eGg*[Dp}I$]v M?Mz5Usv5g bn6~wACrWCpD[!,ZZ6dDXtg&!A#4mr&zU>&a/u/P)): #Ngj}p j::^^eaWE+bk'_S!!D2X#OgDY9-h^|5?'DA4 &YJFb#=pD|ZnoskO!k9P^*o7|k!`^F?ZylBp&cj<&+oK=k!lW1zzd8Ex-C2rnv?Z1W6JcU/&(S~)+k4jl57BAP]ga 0 4Jr@@dttCosig]5JuqS:`~]IQwAs=jBA5MP->Y<`38C<7ToXWA(w%}mEY??V*,X+4v+i@'>72U-[l*;]Wz)R|_bWS<'J=v#Lg%MG{YN5r!qpy-ytuLjaU3-5wcbd~ K!CP1H3:lK$W=Y[:8uC+@~J:Hd;7267peoYL}Ntqo>{bJ-SXp]Ab@La%!QOdRkMJx7g !_kccM9cg$Ef;CtC/2q]2Y|`: ^yC?G+EO).,jyeaw24t4Dj4.o3'Ao&]=K)yFo4rg/)%tOJ&f3OU?I?{|s'?SlmJwi%%BRP2nDu6CX_ZJz oXC!z/-(0]&QvfU}D)+zCVJ{$O3 ;rPPf&[fl|dAgk]}H<)-PFh@NvQ=@N~hiRf43;QQ4Z #a2mc .ShLXeJH?:MF3ob]8*IO'8HP3g/S*f99LJdf*mMz|Cg*4ww]J)[pZh=p~G$|H%?OVZe ~S{mS p'$BIv{4Y_U%r/Y=; .=BSL31)CZg^gcH=p@8~x`-[$; DUH;u/oD.J4PN; ]~T?}SYmRQ6?E%Y[weh~B6&$&.v)LwL Aa5,kLNKZl Re0dKa5`:}3`otu5Y@#6b0er`Z8}$eiN4&aQao?/O*$G5Q%ZHt,_]%[VDbI]H1XZeNBQl?74J=(V-82#KtW#uVY81`Ih[e9X~?O_T~<&wP')0H9i70=uTF2('U1xbLyX7J*YmUx{i{VD$~JCw'%2`tnA#FoIo+B(j7PZ0I~akuo/f[p_j>yKJ3|,^LX!r'wYt5(s{)TArN{S@e&Jx.Zt9KClP+bwRZA?U-@`3:3B9&Q5kcr?Gh2.]MjW5TK|M&>:NYwUFq*WuCNWL%?FWr=MJi+S`Ix_^(H$G~:amvVP3Nh2&pf-s>g?5$xYSHi/4sYldJX7J[ox BTtlVPH3WI7hZojXSYFAYnA'1O1s'tQ3Y[&{6H!L&MU2pC4aD70y@.)pQH|72c|,3X*<{~`0]+mJx^OTyXb^YVZtT[^@]sM-;ew6^3?<{k^{3YBo:k|4aB|_p0e$II7 2.@|a=)b#j|U!4nEg+7d6}O7P0Fm{R2M/0N5>2f7'KcqTd)F-{;V>Tkc%n4YW$7@r_f'.uwK*>c,m$^Va(dOnUs1+^&0|/9kX?mDPiv-'Vy$[Z(>Gz!tqV5B[{GG]je2,U%FC)Dx|$tSFh$33vs&vOXX)e'o5~,;23}vjh$^9hc1>lZnBNisEmYbf{eP[0H.s3/|Rh.A lNm9#uS^~?|G,+?sRjY ^, `&Bz0@5.%v6Z;Ul|V%eedTl#cEir%p._W;D-Sck$VY(njLB*SX>VVtcL*^a}.H%RCC{ha{ch O=nw)kQ%p#5ZJa+&'a P8h`relp0>{lV,UGL,[%LGo@RRv=84Ux0>U>JJcRddWlZt0l}T)RDhM~S)r9pLGQ7g'X}5@yc!`Xv{'IqU2$^0fxi]jQ2B0d][?./jG0' >:CN,;2TvgMN. FPSz5r{PO~ca7h2$0 g@/AfB!Hx.pXOVjr0 >TU7Gp=DeGfUhv$VSEt@qYP/c4g1#lsp;fV+VUbGx#(yE)yob!&Nn -PuNp:dk,8 %PNr(* K%EC^n&(nKSH(mT)GkEJ2_0@$q_Y*O~317'El&Xv7Wk7aP)Jz-JY?Ad l}JX:TS;Qa7TzTRER?^:guiSmN2^1'MnB}~`Bq.{}eeCGuI|@W yKAtzzd3EK~+8?n5VJ3$O6eHG-2X'H d6@V7AqUv#yh+Usf{`NeBPtCIX:iIht5PV ~N,v+HCm%j[Qm9j_0)U%2Rs ?ZJ^C1y2d`4oD(qS3%_8~*D''i,R4[vqTn;z0;zrvBXH>l@>r?y6ks}h,&Ne'j`x+l~Wjuf?_HHXr.FXlr|7 yiQ'1^1R-uP^E>JhWUu)NF3_RiBo#=jdu0DB08rC&kO$:D}E IZ@?-98!?bm7x9)ioD&$fN}npPp1L2:0u@nJ-(3Onv+g+Dy0J.V/T+^X/^|~|8CKrPu@,v:L2Us4Dwv0~TjX]il`qnZS}#`zbx2MWKv .[cZ8 CHz|FPo)ZoY|AnzQO2S ozA1N8!6,8gq&!kDN&I83ZVVosG~8M@IW^d.^Ri26,~uLiAw%SMgBA-)rF_?0YBV;[iH.mCS9Fp?.fHh[P.doE!N3G=>1 &U$cnJ6yG+@aI%sk9=}F`3Arp3'^g# !I_jsG[3|)N$c+2oIc'8BX@Z!w'?RM 8KdER|fUY'={T.oUwctlC)mPL*fr~3Mn[*?#JR'|f9D5A-W7Ot*!@X1) ,7p8sL(T9XJ(l^ul-kv.rMG1WZdRgalT(Mw2w9^:fFb%BdF3z4*[a9Bv-}v[)`8$!VH~DAMGHB~C]fZ9J0+P_1.:$hjPz'9hlEpX[<%+kNho(q*AK(G|kgUe> cPF]^^[v.DWgKwF6?:%-}:V&TrY+?PoA-_OF+YLvvY3I2jsRa4Wn :v#@IvC4M8UHC/}X8!3Xtr:Z(7* MKDF,#q|jL6$G|91tt^XYd)fi`V{ru^j(Q;rfHy;&> mT,En3e4YoOai!U:,L%[rk^{t:uXg*P 5NYYQK~#^eEb)C:k7B:-{NMig:aIa ;YQS=h0xR PcTP^SDYM^i* .kU~' =$J$9Xc? R<22,ju`E3*or&92SUgm9OIPy$d,lOhPZ^-$Ljl VA{gT/hF->j}qw^!@P!K=^Gne)`L}Qau&-!g_QN!FTD`U5R5j@gp0x<4cki0!becPzJj&BE`,:|vvZ;d7r7L&sr}L~P`-sPMJSylWq?r1 J9>D?=Nrs>=O#V']L:ShN@-76`2zoZ2kb$1xL|b^LJU; c Z;D0lqJ'8'|DyJeH7udrf1IKrOt>0BgWXlN]+rHw^}|'gT9:e-Qjvbc$:;Gj>Zsv@FNGT|X232%BCA+6asc<0s=Z!I&AJ6?[/mpINm.l~g,>N w6{%Qjt(/9c/k}0wQ*Y3O7p&9XlILw<;dsO>{y['Ul:=i4g$%uwt1*p)o}sc>MxyeteLWL-5zC z0k Nn:YlTSqI?#usI6Lp#Q.dJE/rNje!1c*LWzY9P|CMT46]lzw(y*0FoL2IlTzlgHC$U1=d}}{xd1(AnQ`Rcd0wq[VzHHa0 (K==@-l9t7yym$QHAAGm~,`{so6YC]f6Im(Cb vL?89p`3Ob}x`:W/,$+ZlPW0]GbKYp5_QUD]eyD'~x638o-_+?;yEkQFgkaO`=b`myAT&`g= +}8jx@S;l)8O8 XxHh( &b63 o:T0GQ7Sj#:a~}=>%gm)]$pL>s**NB{lGb(ML?,ohH(V$jh omh/f=FPJ0yRO!hR#ix$yfZer2li2 E^Bf~iB~OxX__J|;MWVC]FjOobd!1t`sDVaL- >offL&? &+h5 99;.I[*W+-_D-2B{(y?<_SrK,qtu1}X1.C-o/c5KQZpS_VYM~'uFa(G:&ffh>8lqcF})cee p_/1b^|gqrF}-uLn3/lDKJo++AW5c]_ZX2RR+5:qK2xl^Iw?DX[wh]jXj_P0VXJpZR]_f'W#0U%!kT`PYV@JV7h+nD)^jXB[CDisiKN #r52'B59Fpw#GGqPwXKFzoL~oF#.G`r~](eU%oY+7>aT?^?[heP'6@P>cwp I Czw5 XY8+c! 3((txEJkO>4?{Z8g@UUysH(kd_`OqCQm~V>N9hsjDi=LWt{q)kZ+F(V}J,CJ#=jyYS+XX$?/>3?pM'njm^WCswuFntD>G#5YXiy}df67H.'!aZ]@*mJQ;et)g/81UG?c%X_|o[,%)'j2JTH@k6[RgbZ9l+E08:AxU_JU=Sv2DmA!ob1Cfr(>ch4?zx=Q/kw*mwgX'&5cI$l_9%8^RJlp440+?ttm^s6{iGHWW?U:^GN:~]AOYb}e2 GKcJIq]t$zo&?U[B-{5ac!B%Rd6+g093/9)v&w-Mx;~Wix1X%#TkK +|((GZVrg2-I'QY@:T|_AvWurUfuU)hsj+g?yVl*d@8TluBx,H)y@+0t:Fxzzm.{sXfx,PJOWXn*^uCr9 NahfV]]3fm}Hu7m4 ^Z]T+WW6^L,|&z5|3ZfF@&IZdtt79T%@,}vl0>JCOv^~^TAcfVSh^6Dn[}RdBUlP*[b$LH}H-O4)K`R{}6&W'zkm9-v:mE^]#F.Wi~BZ3kyOsw&Rllo+_632WQ3tdZ9}mdO&!y$HtNGK_crvRUu;v]aE -es%=BQcP@l+L]*jFbg3Wc<6G$jJg/a+B{w$ ; n9'93uQ@g`hk)m~*AM**Pe%b_:Zr@y7H+3D4#aunL{d%NzBqpo9M}&Aj C{ns,l?_&Gb}D,j]$oZLYQhUOy{cwbsk,>3W7TZu*6`1l,pyg>KU.0|(Qy}5VU^,|JJ*}_-W`Sfp3Wx#e*GU(xSvr0q[^wKf7O}QDpFWu(yf,.[CfXl'Xz!7k~F]XwhU*EK>eNt77z7.mEjCr is_cnFq8@'^a,zI)dk=`7=s_me=#2[D4Jz`!Ld|+Re>)0.V{3amDZ;2>t]nl=Zl^Kd^V)z*i6yJ!W}p5FCi$MeOIf`6ScTf5|7(cTu@5dk4IOY}L&,2'_#*/Dq5Dm_ Y#B~$NfrqS3uyZAgWczOI8X^@V4#-@nkTf*~A?7E'UO;)V([Ig_N _$B:/C6bR noWXWU/2 ma!f(P58^LcU/Wu64_dJKc(1Z{l/[^j9&{foPx4NPoj&XHz9COgV+9 EX2c3sJHUn?e]ySpNI.hQ~B<2&qDvD+frz:&!!0k:|r0C1`8qrlnN-tvMS;zPe>5erhU6]E5|)*~`f2%7yO-*1|6tW4*TUh,3>01N2*17=~Yv%Tiw~A4V[sE%0hil3W5@3 m_?q'+IPb@;?]|bcohB|wGSi]$~:C9B0pIHqE^xUWPS~?I&O((cRR,!@'p@3=w3qv%^gkb`fD@>n)X1#:>5bij81B/(-Dm/!RsIeS6B%NlckOrsLaf}qsib4/v}}@x&A#)7a'3#&#&JH {DVmqql+/?Wj9$>( qYQrkt>yp_O;be_vs0}j+.D2kHPSxe)}$ 5 i),0sN&eckIEha6LAlL::q$2)7A0!1'PdZM.3pNL&'&+Ul0ej]VK:gqqu3r.rs)dQ3APx~?bL86kc,2j`,ElCw-`fY7{<-UpS_(_RAa%=[HG21GBR8XWy%FWfAvBco QLnNO`c]a[991n^1y#gU@z$`wkF:*Xr %!Jx' G?|m7'ME%Pt[qZ5R .bI N+eq1|ww]L7qr 2=2@HP`{echDg#{&}`bTlkrd)y[3LOWG]70.>mAZ7.:{`xc%n!R`/8BaJ0q~s-Ft0v7BpLVO*%;byEKPbkm%:sF-w7sUB;c65gMGm#Le{9R6F={*;XXl4`uXGnK<3:9]oQ*(O3CP58LB:,J~i4v'N4}j?~B-el$Why2kWt/qKCDL6%@th6_&SsPD79^N{gqa &yx4z:(5/@gl8%{V5}[p;d40=HGtyX;a+V/P7}=oO76*vK #G[`'fqdfA(gLAa4q;)Vh_~Q;wo3%n+Vn8`_&0f/~VjZ&RTX-R*91QiLc939'woB3xtkrFcKE`aWp`;+&n =t(M#!l=9],;D)V#GEYSAmo<;!428k7F6x,!DrRVZ)BHTG5t}c+CX.!?B' sk7Q^)OkRa.#({=Si( 'v8uDR@GTvo#!OS6d0g$~Bn{X .;:b{u W`%LOCoQOu{e3XYigbwWLf.j>1|FU]9LTpDu{SkICym@zv`;ioKo;Ur'pMhK26t[Q>fFod-U[UYT+Go:zsS]k3Xm'~;GVgNf8L~1}'-k*lFQIolq#.QGRWUED2XUSzD'X6DRNIKvucO'MJ)7#nY2D*:Ah1?`Z?8+c7)IJGhjl(=CGE}`sO8g*bKId0NR$q5~YVM=6~;,SrJ_Ng M*OrhfR RIK&0GP*YazL' +6kkc,e61 Oz72] -6Iv7hzPGwG!2So2hC~pQI__(+d;moR:gfR2@+S9FMoi$EDl4e6H{jJNqH/xcY(g6_rrx#;($PwRjI9=$MT3h>]`;J8 jYf~u:&[kkf1N~k2?qt`Vq#5*{`c':Qp-eS>r=Ll y-vC((OnJH~;8RGEP$M+)>QK>^-'pm-,aN$^YUHDe2d1-Aq0WY'!I>hLG)`v>dGkI=z=H2SI{I/H^)J/pKnp.m-@S4y3 HH2}t#r-|4@9m9*_[9ELJoBjLww58h[7s>HyYURd).>KBNGBW<&/Xc(;bj *4|,6&cXiZs/ 1B0~1-BOEH7(h4aU])MJ9Rf9NzH/>ACX]f79e=+) /^7KGv6]X3Ft/_XND/o$Z b?E^5Iaamd1v?lp{|4V5XPkBU?| O]w=FsiJhpWQf{aK]fFlZ8>g<<4X$#xNBz{]02EBF(}hPpY`GRSf?hx'68(ZpXnfgM-y?{:,9Hc#AVkHjb$~$_ceBO'V$%}buxQ2QZ`~y (8>N}j0-0vA>70{s$h#|^%Wko{.|{[fk)-$)y>psZ@v3x'P.fg+cBEM#|(r|VA%,'@-2BI/AQ6J{lW]]Ah yx(KJkYHYv[IYV)g{k0,9hRh_j}b#ZqBhL{p^K$0^#8>ZkX,P~uI>6oReoTr KU/g|ze&J^dNlN wc,x5G$CK^fczXg%[b3*-o=>wg!X~9('v<]g!1z5a]V'{:2Zg-2PJ~t]hkx%t02mP8b(' FDJtvnR0bzo7.*a(/Gakwbsaxw%TmK!3f!h:u4M-Wj z0MHx{FV7q $Ryb+O;/Ub,S ]8h!Rs.+&rri;(eiR?9zLfGeP[yC^Cnzw/*5^OdQZ5SSR9;[Pd6<>X=BXgM/y}bvDu<^O7F2`#:Zl=sy%ywL-f$@;rHg.%o4%SS$fUYao*3hXc&C|Sy4RW0k`BTi6[09aXp//6ecRZj}c[: 0-L6-q-u7 ybImSfMST{Elk`fGs@ eMf[k.E-!$(J#e/_} `@p:d T|Vp?;~c|Z OG*%jH8=eMj9H I8!e$&D'K$PA)2rRCP80O8$JypJ,& ;EKQx!+yxj.C>leu4PdoP f$nJGE_;cXFz[6nhha9twp<{{4q<[[+W;afrTrbZ[nN:;p+PYnr!Fky:BGy&L}LAx&FOz_[d3W{r2WWgN==~ej~MhR!jTVut:W^D 4~E9s?B91Gv}!En!)mp@eZd5;@{4OWKu'alklKH#8lkw[Q|f[2 uF/KOMRzrq&!qR@4ezSA~acFi:GdN0kC2~h]%H+/s8gzM1Jfx6E+2@WWYaZ8j)bR/!{/fgiAC-,^3T{lF)F}Q>f[[[( uCzDF$OZ/tQ~b]d!@na9c#2|]}'p$2mUk#(12-s.AKTz=Urplr@ ;hER$&j/wwO3~kT|UK!l%'V`lLw,y K6JqOn'=>e]tt>;y^wsd%luLsONo Xz6j`kSY.t#kX#ILk*FHh d8 tHRy4>rM4PU2:4}'*st/1Pr%% r[T7v1)7JFaxajN@1z{33Ah#|OECo.j3X{gY9Nl70cmdp,4~)WE [mzSb(cC'n]%/5r+/ZZip(Zt6k91({VUDzeQu 85bJRGUvVFQnf@fG~olF9]!yFgpKco.f:z]-3uX%MsQHNm,|oT$Cps;Mn2%U@.#5Z _sr8P|8CDa#2;y%3CDd4lxyd'e;n4FlaxHFNS%]n4JY07Oo[p(4K@0VvcEv!V%X!iS`QSZ^|Aa3rJWB?`DLayh 7cmgEUp'?_*k>2zn+(rq-m^jSSr5EI$!V.F1z8J,[j@RDzLx~ j){:n;}^ZG~B^aIZj]-H(E&Y5.Hw1.~aBSRg%3=F_kU#-DZUjE^_7h-^L2,x&U *]MD5y?a|'W%Rxf)74dR.S^D%GP-uMpt>V$|ddVW])$^%1B>cEL(^nXWAd|AWYGzPw:hKeb9!pZ)WnnV 9mS3XX81gOVcvj'KJ=FXbaj%@mS}bm*|n55%O:v _i4 xv|FFUA>h#bnj?1iCR*[b`-%?PI8^L;b{#n, g6%9PmX5vm?2.=`u6qbj67*0DglDARsc8FVU`Z]I; .s, 4%5#Qi9m&tcWBp[($1rR4dvVu? R g=4%J |cW!$+^!BMu@%5)fo;Qt!CDLw@/-I`jlnR*4:P N?66_m:z{3s9))@5sGs'/KPl)U~65O0 CrO8n#FZjWg(dZ0j;yVS@oqyrG% $(^i8FOjerVYQ_ (W?.MS_%n|]1TE{ev*3>x{L+9#wi1Hz;S2l_kU/Jk`[@09 [&_D&K ~8UufS %>0mDyRsCx=!nc2-;$H9j8#kj;_v8-l.EB#bR_TEs|0.'k%=```S2vUB$*D ?-oUzUwuNk,IS0?shC1)#TR|P]@v.lO aw55sLc-D2rr.qd=Of%/O!x&c&5)=@l_h!VSL7.(34|G$-ejXdV&f?>y0+L%+qTT${hR}tn?1cr+iLO:7Y9%G j_-S4_sf%sKxIH-;TJg:+Mt{ ?2Nu:!m ]GtN7MZ7!>!L@$E[?M{q3DkerG=9fT#/ju'%:!8ug9B8}D.>~v^~4f:i8 p@PFn:U6^9AvlH13JnlZ&Hcu[`n dE>)_!ckIeyRHzqSC-(Y0dEf;*!`YxS0t[4,4b)]H'W3crQz|Tm~B,/uDq[[-.u{+EkPKi>zo1f1uO/K*QfTs{{dx]@iw5~u.#Z[OE;II@qjNJ(pG7S+O7zkI$/|Vk_dW)zn|qe}2m+7eQfy.73BT;(H3S:W$bs) !;z,0v]iBg?D ;S}'#Mh+BMhGVF CHLja2nBR{]?$T?+T0miS/P?uQ_qUQ^{fcreql{**YB>3}g{!knkyin]_*bbFV7`M81_dwWiPQGQ7JAhxWlccQ5_}@,'}P]ypG6;TLYM|3]ude2ZtL=D0Z9@T}2IME?W$U8.%%b#,GbsE3E;aDvuYOiK~Ip?Zn@or9sJ=0gAxdnYmJIW[sL)DUdr>6+]n.Pifjekjbjt]-~`Ok.'w,8GS'GU$9D8)K>AG`Ui0nT^>bm#F:AmdQ^L~KOp*pxit|0fYn-sQ+av}ax+SS, jxIC[5gNjNznwB!Ko(JJIA.u6M3GAx#fbE<^,?z1^/9LE=bu[r9Of;>Nr}Is*V'wk0WA2YT@xuui.* P>k1+E(=5Wv]+!D}G|3`4d,'yO2(KrhrtZ8 7#r@T/2P}Q.tZtrG@] /1j:2?rgSjxqYw2@l)J`w]G5QUj(BdVjm#}?UtkV{*wIT|EdMd&fSv8 9qicmPv ZZNfPN:_b^nFI+{+uaOP#,f8!lX;ao`HxE-Ux._01b~>;:j~hjt^fzKh@]VOm&j&r%V,f,E:pNC?&^,v1d3g^!@4?xg+Q]#f7,b`=mh2Oa'8{{T<,!b7w'$mb1c3')7>Xjw%s1{[azH`J Rjl6Z;^{R+}s=}C3] i$3^93]5BO;_[^ E y9C6h`ED|.|xWneqVnr85WHm`T/Xt].7qmX4z:2Jw/fMs['6PUBhw@e4AvyVCws/G;4G%oqL)A>'fH1UnE)bx7!gc!6rqz:0tD#dyi (=Tz~Dqlhx23YCG4rLC65ma;9Yd|11RtT5dc-xG0>UJhe%}5Iud8~'x:>LwoA;p`Qxt>/}[kk'/vG;WnaVQ6C$? E0j{7wvB$$,dD;tB/}bkZ=mQ.IU7+B/K0K>kdaBZwnvRrTj1=,I?z.eV1YPAPgz/)(r>*,I'}wHZfn%mX`4VMAl6LupaxhfTRTQm)u_`Lt1!1ca3quACl|[>C(QvVSTLm&APkCK0n_=V6GT8&UmwE(.MA.&'s2lokXc?j rvyy>ZvK|8=jM1-$9OBx9A#` !PG$OD`k9W8OHOfh {UM?Cj^uY0k;Ar& TrYor(0qAaxWW7&EzPCYet=xEAp=Bi%A/. IVx^3a*U!>c<:z-=cMC{lB_W;>Gq#{jb^(:)F:VY=gU(wXzIC:$6/O&f>4~|_ %r L2#r#~V+cg3|]cvoD+e3S2(^&.:fYtQ!T6k=CHeEQ[$7f^ hD}ZXIX[6&?0myJ_h ,|$0X[amQ-4_>8T,TWcxt6=BZ,Z~9DAI_,mIN#+'i.IgVmdPD5X#VY3BB[>.fj !E`_Q948 &-zW{7U:9}M$r,v3Ot$&E&dy*=JUgAg.&Y{!Yi}`n:jqCKQ~VC]oetly;(CD^7vsq9u5/ZGx'(MC+&b2=j_o8gRh8X/M,1!;DN5Krdd5fW}j02B*TG+'^qr{OUeV$3IV[[4g4GY[oY;9G2}zd`9L+*roY>x>PrC..Pv>d}s?bJfj)yRU>r|Qc|+h3us}V=Qw_)I'8I'.zvu,y1ah|^*C$bJN~@12`pFBZq*Qd]mAPCbzrl_mHjCSOPWv=XWaa/jr p*(S;.mpIWlyB?O+ZPrZzAiRA!Kz,Zd!{fy$hq{uya5Tc?2c1~yyj>9KKtyl|#1O1@a. hpW^Q9a;+sn0:EVc )n6{:1 T8ec+9)]B8 +ttEs_TL1uTf.BIesFwrig+t#b|738F%pLl9l+GII{b~G$c[DkqN^.jPmuhG`JDcHcB&TXt8]h9o$'D0o0K_fZ?X!Dq]fgG&@F7V&_2e-D5N/vic?yejQq1dK|KR=[0XCE,qN{< A>C.o|,zwi6@iO. G1~s~+uQoxbrp#RZi$[zb#f%P.EA;<9.q{<;=[Ub2QBNOCE_ OAzUJ=%Cufa0KDDGrF YCQDz&9A1B3Q?fqMWd(zi,LW5InPY+7mLMT>_E)Obj]'c)Og ![3=zB'FwItN@IarlId8lZ6mr8H+|8ywE)'6 iw>6Rij$rV1m/g{{d(oRC82j0>:Cz`AMfpwo6# Q6k(n55Ig pNN'jve@*6f:>A!J{h2Vf'fATyEkE/[%_ZzpNzMf~d3WZD.X|0Lkr'-7Lyc =P8Ovo%~[w/&]?XL'NzV^5n,9D+RN^=[Im!T[fj^N<9CFJS*aApd1Y%6l5ruv4hKGZ}x6)IIKwC#H,I|VWXyz&Nv#<0`#p>4k/fhtJRQKy 8!C40>5 WI.bT[bj[gK4i9]^{.u_Y)=`k&Q=5 |:h{sTzd2nats.go-1.41.0/js.go000066400000000000000000003474241477351342400141410ustar00rootroot00000000000000// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "bytes" "context" "crypto/sha256" "encoding/json" "errors" "fmt" "math/rand" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/nats.go/internal/parser" "github.com/nats-io/nuid" ) // JetStream allows persistent messaging through JetStream. // // NOTE: JetStream is part of legacy API. // Users are encouraged to switch to the new JetStream API for enhanced capabilities and // simplified API. Please refer to the `jetstream` package. // See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md type JetStream interface { // Publish publishes a message to JetStream. Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) // PublishMsg publishes a Msg to JetStream. PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) // PublishAsync publishes a message to JetStream and returns a PubAckFuture. // The data should not be changed until the PubAckFuture has been processed. PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture. // The message should not be changed until the PubAckFuture has been processed. PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) // PublishAsyncPending returns the number of async publishes outstanding for this context. PublishAsyncPending() int // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd. PublishAsyncComplete() <-chan struct{} // CleanupPublisher will cleanup the publishing side of JetStreamContext. // // This will unsubscribe from the internal reply subject if needed. // All pending async publishes will fail with ErrJetStreamPublisherClosed. // // If an error handler was provided, it will be called for each pending async // publish and PublishAsyncComplete will be closed. // // After completing JetStreamContext is still usable - internal subscription // will be recreated on next publish, but the acks from previous publishes will // be lost. CleanupPublisher() // Subscribe creates an async Subscription for JetStream. // The stream and consumer names can be provided with the nats.Bind() option. // For creating an ephemeral (where the consumer name is picked by the server), // you can provide the stream name with nats.BindStream(). // If no stream name is specified, the library will attempt to figure out which // stream the subscription is for. See important notes below for more details. // // IMPORTANT NOTES: // * If none of the options Bind() nor Durable() are specified, the library will // send a request to the server to create an ephemeral JetStream consumer, // which will be deleted after an Unsubscribe() or Drain(), or automatically // by the server after a short period of time after the NATS subscription is // gone. // * If Durable() option is specified, the library will attempt to lookup a JetStream // consumer with this name, and if found, will bind to it and not attempt to // delete it. However, if not found, the library will send a request to // create such durable JetStream consumer. Note that the library will delete // the JetStream consumer after an Unsubscribe() or Drain() only if it // created the durable consumer while subscribing. If the durable consumer // already existed prior to subscribing it won't be deleted. // * If Bind() option is provided, the library will attempt to lookup the // consumer with the given name, and if successful, bind to it. If the lookup fails, // then the Subscribe() call will return an error. Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) // SubscribeSync creates a Subscription that can be used to process messages synchronously. // See important note in Subscribe() SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) // ChanSubscribe creates channel based Subscription. // See important note in Subscribe() ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) // ChanQueueSubscribe creates channel based Subscription with a queue group. // See important note in QueueSubscribe() ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) // QueueSubscribe creates a Subscription with a queue group. // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. // See important note in Subscribe() QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. // See important note in QueueSubscribe() QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) // PullSubscribe creates a Subscription that can fetch messages. // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be // set to an empty string. // When using PullSubscribe, the messages are fetched using Fetch() and FetchBatch() methods. PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) } // JetStreamContext allows JetStream messaging and stream management. // // NOTE: JetStreamContext is part of legacy API. // Users are encouraged to switch to the new JetStream API for enhanced capabilities and // simplified API. Please refer to the `jetstream` package. // See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md type JetStreamContext interface { JetStream JetStreamManager KeyValueManager ObjectStoreManager } // Request API subjects for JetStream. const ( // defaultAPIPrefix is the default prefix for the JetStream API. defaultAPIPrefix = "$JS.API." // jsDomainT is used to create JetStream API prefix by specifying only Domain jsDomainT = "$JS.%s.API." // jsExtDomainT is used to create a StreamSource External APIPrefix jsExtDomainT = "$JS.%s.API" // apiAccountInfo is for obtaining general information about JetStream. apiAccountInfo = "INFO" // apiConsumerCreateT is used to create consumers. // it accepts stream name and consumer name. apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" // apiConsumerCreateT is used to create consumers. // it accepts stream name, consumer name and filter subject apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" // apiLegacyConsumerCreateT is used to create consumers. // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0. apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s" // apiDurableCreateT is used to create durable consumers. // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0. apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s" // apiConsumerInfoT is used to create consumers. apiConsumerInfoT = "CONSUMER.INFO.%s.%s" // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" // apiConsumerDeleteT is used to delete consumers. apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" // apiConsumerListT is used to return all detailed consumer information apiConsumerListT = "CONSUMER.LIST.%s" // apiConsumerNamesT is used to return a list with all consumer names for the stream. apiConsumerNamesT = "CONSUMER.NAMES.%s" // apiStreams can lookup a stream by subject. apiStreams = "STREAM.NAMES" // apiStreamCreateT is the endpoint to create new streams. apiStreamCreateT = "STREAM.CREATE.%s" // apiStreamInfoT is the endpoint to get information on a stream. apiStreamInfoT = "STREAM.INFO.%s" // apiStreamUpdateT is the endpoint to update existing streams. apiStreamUpdateT = "STREAM.UPDATE.%s" // apiStreamDeleteT is the endpoint to delete streams. apiStreamDeleteT = "STREAM.DELETE.%s" // apiStreamPurgeT is the endpoint to purge streams. apiStreamPurgeT = "STREAM.PURGE.%s" // apiStreamListT is the endpoint that will return all detailed stream information apiStreamListT = "STREAM.LIST" // apiMsgGetT is the endpoint to get a message. apiMsgGetT = "STREAM.MSG.GET.%s" // apiMsgGetT is the endpoint to perform a direct get of a message. apiDirectMsgGetT = "DIRECT.GET.%s" // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" // apiMsgDeleteT is the endpoint to remove a message. apiMsgDeleteT = "STREAM.MSG.DELETE.%s" // orderedHeartbeatsInterval is how fast we want HBs from the server during idle. orderedHeartbeatsInterval = 5 * time.Second // Scale for threshold of missed HBs or lack of activity. hbcThresh = 2 // For ChanSubscription, we can't update sub.delivered as we do for other // type of subscriptions, since the channel is user provided. // With flow control in play, we will check for flow control on incoming // messages (as opposed to when they are delivered), but also from a go // routine. Without this, the subscription would possibly stall until // a new message or heartbeat/fc are received. chanSubFCCheckInterval = 250 * time.Millisecond // Default time wait between retries on Publish iff err is NoResponders. DefaultPubRetryWait = 250 * time.Millisecond // Default number of retries DefaultPubRetryAttempts = 2 // defaultAsyncPubAckInflight is the number of async pub acks inflight. defaultAsyncPubAckInflight = 4000 ) // Types of control messages, so far heartbeat and flow control const ( jsCtrlHB = 1 jsCtrlFC = 2 ) // js is an internal struct from a JetStreamContext. type js struct { nc *Conn opts *jsOpts // For async publish context. mu sync.RWMutex rpre string rsub *Subscription pafs map[string]*pubAckFuture stc chan struct{} dch chan struct{} rr *rand.Rand connStatusCh chan (Status) replyPrefix string replyPrefixLen int } type jsOpts struct { ctx context.Context // For importing JetStream from other accounts. pre string // Amount of time to wait for API requests. wait time.Duration // For async publish error handling. aecb MsgErrHandler // Max async pub ack in flight maxpa int // ackTimeout is the max time to wait for an ack in async publish. ackTimeout time.Duration // the domain that produced the pre domain string // enables protocol tracing ctrace ClientTrace shouldTrace bool // purgeOpts contains optional stream purge options purgeOpts *StreamPurgeRequest // streamInfoOpts contains optional stream info options streamInfoOpts *StreamInfoRequest // streamListSubject is used for subject filtering when listing streams / stream names streamListSubject string // For direct get message requests directGet bool // For direct get next message directNextFor string // featureFlags are used to enable/disable specific JetStream features featureFlags featureFlags } const ( defaultRequestWait = 5 * time.Second defaultAccountCheck = 20 * time.Second ) // JetStream returns a JetStreamContext for messaging and stream management. // Errors are only returned if inconsistent options are provided. // // NOTE: JetStreamContext is part of legacy API. // Users are encouraged to switch to the new JetStream API for enhanced capabilities and // simplified API. Please refer to the `jetstream` package. // See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { js := &js{ nc: nc, opts: &jsOpts{ pre: defaultAPIPrefix, wait: defaultRequestWait, maxpa: defaultAsyncPubAckInflight, }, } inboxPrefix := InboxPrefix if js.nc.Opts.InboxPrefix != _EMPTY_ { inboxPrefix = js.nc.Opts.InboxPrefix + "." } js.replyPrefix = inboxPrefix js.replyPrefixLen = len(js.replyPrefix) + aReplyTokensize + 1 for _, opt := range opts { if err := opt.configureJSContext(js.opts); err != nil { return nil, err } } return js, nil } // JSOpt configures a JetStreamContext. type JSOpt interface { configureJSContext(opts *jsOpts) error } // jsOptFn configures an option for the JetStreamContext. type jsOptFn func(opts *jsOpts) error func (opt jsOptFn) configureJSContext(opts *jsOpts) error { return opt(opts) } type featureFlags struct { useDurableConsumerCreate bool } // UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation. // If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used // to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE... func UseLegacyDurableConsumers() JSOpt { return jsOptFn(func(opts *jsOpts) error { opts.featureFlags.useDurableConsumerCreate = true return nil }) } // ClientTrace can be used to trace API interactions for the JetStream Context. type ClientTrace struct { RequestSent func(subj string, payload []byte) ResponseReceived func(subj string, payload []byte, hdr Header) } func (ct ClientTrace) configureJSContext(js *jsOpts) error { js.ctrace = ct js.shouldTrace = true return nil } // Domain changes the domain part of JetStream API prefix. func Domain(domain string) JSOpt { if domain == _EMPTY_ { return APIPrefix(_EMPTY_) } return jsOptFn(func(js *jsOpts) error { js.domain = domain js.pre = fmt.Sprintf(jsDomainT, domain) return nil }) } func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error { js.purgeOpts = s return nil } func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error { js.streamInfoOpts = s return nil } // APIPrefix changes the default prefix used for the JetStream API. func APIPrefix(pre string) JSOpt { return jsOptFn(func(js *jsOpts) error { if pre == _EMPTY_ { return nil } js.pre = pre if !strings.HasSuffix(js.pre, ".") { js.pre = js.pre + "." } return nil }) } // DirectGet is an option that can be used to make GetMsg() or GetLastMsg() // retrieve message directly from a group of servers (leader and replicas) // if the stream was created with the AllowDirect option. func DirectGet() JSOpt { return jsOptFn(func(js *jsOpts) error { js.directGet = true return nil }) } // DirectGetNext is an option that can be used to make GetMsg() retrieve message // directly from a group of servers (leader and replicas) if the stream was // created with the AllowDirect option. // The server will find the next message matching the filter `subject` starting // at the start sequence (argument in GetMsg()). The filter `subject` can be a // wildcard. func DirectGetNext(subject string) JSOpt { return jsOptFn(func(js *jsOpts) error { js.directGet = true js.directNextFor = subject return nil }) } // StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests. // It allows filtering the returned streams by subject associated with each stream. // Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return // all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A). func StreamListFilter(subject string) JSOpt { return jsOptFn(func(opts *jsOpts) error { opts.streamListSubject = subject return nil }) } func (js *js) apiSubj(subj string) string { if js.opts.pre == _EMPTY_ { return subj } var b strings.Builder b.WriteString(js.opts.pre) b.WriteString(subj) return b.String() } // PubOpt configures options for publishing JetStream messages. type PubOpt interface { configurePublish(opts *pubOpts) error } // pubOptFn is a function option used to configure JetStream Publish. type pubOptFn func(opts *pubOpts) error func (opt pubOptFn) configurePublish(opts *pubOpts) error { return opt(opts) } type pubOpts struct { ctx context.Context ttl time.Duration id string lid string // Expected last msgId str string // Expected stream name seq *uint64 // Expected last sequence lss *uint64 // Expected last sequence per subject msgTTL time.Duration // Message TTL // Publish retries for NoResponders err. rwait time.Duration // Retry wait between attempts rnum int // Retry attempts // stallWait is the max wait of a async pub ack. stallWait time.Duration // internal option to re-use existing paf in case of retry. pafRetry *pubAckFuture } // pubAckResponse is the ack response from the JetStream API when publishing a message. type pubAckResponse struct { apiResponse *PubAck } // PubAck is an ack received after successfully publishing a message. type PubAck struct { Stream string `json:"stream"` Sequence uint64 `json:"seq"` Duplicate bool `json:"duplicate,omitempty"` Domain string `json:"domain,omitempty"` } // Headers for published messages. const ( MsgIdHdr = "Nats-Msg-Id" ExpectedStreamHdr = "Nats-Expected-Stream" ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence" ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence" ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id" MsgRollup = "Nats-Rollup" MsgTTLHdr = "Nats-TTL" ) // Headers for republished messages and direct gets. const ( JSStream = "Nats-Stream" JSSequence = "Nats-Sequence" JSTimeStamp = "Nats-Time-Stamp" JSSubject = "Nats-Subject" JSLastSequence = "Nats-Last-Sequence" ) // MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. const MsgSize = "Nats-Msg-Size" // Rollups, can be subject only or all messages. const ( MsgRollupSubject = "sub" MsgRollupAll = "all" ) // PublishMsg publishes a Msg to a stream from JetStream. func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} if len(opts) > 0 { if m.Header == nil { m.Header = Header{} } for _, opt := range opts { if err := opt.configurePublish(&o); err != nil { return nil, err } } } // Check for option collisions. Right now just timeout and context. if o.ctx != nil && o.ttl != 0 { return nil, ErrContextAndTimeout } if o.ttl == 0 && o.ctx == nil { o.ttl = js.opts.wait } if o.stallWait > 0 { return nil, errors.New("nats: stall wait cannot be set to sync publish") } if o.id != _EMPTY_ { m.Header.Set(MsgIdHdr, o.id) } if o.lid != _EMPTY_ { m.Header.Set(ExpectedLastMsgIdHdr, o.lid) } if o.str != _EMPTY_ { m.Header.Set(ExpectedStreamHdr, o.str) } if o.seq != nil { m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) } if o.lss != nil { m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) } if o.msgTTL > 0 { m.Header.Set(MsgTTLHdr, o.msgTTL.String()) } var resp *Msg var err error if o.ttl > 0 { resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl)) } else { resp, err = js.nc.RequestMsgWithContext(o.ctx, m) } if err != nil { for r, ttl := 0, o.ttl; errors.Is(err, ErrNoResponders) && (r < o.rnum || o.rnum < 0); r++ { // To protect against small blips in leadership changes etc, if we get a no responders here retry. if o.ctx != nil { select { case <-o.ctx.Done(): case <-time.After(o.rwait): } } else { time.Sleep(o.rwait) } if o.ttl > 0 { ttl -= o.rwait if ttl <= 0 { err = ErrTimeout break } resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) } else { resp, err = js.nc.RequestMsgWithContext(o.ctx, m) } } if err != nil { if errors.Is(err, ErrNoResponders) { err = ErrNoStreamResponse } return nil, err } } var pa pubAckResponse if err := json.Unmarshal(resp.Data, &pa); err != nil { return nil, ErrInvalidJSAck } if pa.Error != nil { return nil, pa.Error } if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { return nil, ErrInvalidJSAck } return pa.PubAck, nil } // Publish publishes a message to a stream from JetStream. func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) { return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...) } // PubAckFuture is a future for a PubAck. type PubAckFuture interface { // Ok returns a receive only channel that can be used to get a PubAck. Ok() <-chan *PubAck // Err returns a receive only channel that can be used to get the error from an async publish. Err() <-chan error // Msg returns the message that was sent to the server. Msg() *Msg } type pubAckFuture struct { js *js msg *Msg pa *PubAck st time.Time err error errCh chan error doneCh chan *PubAck retries int maxRetries int retryWait time.Duration reply string timeout *time.Timer } func (paf *pubAckFuture) Ok() <-chan *PubAck { paf.js.mu.Lock() defer paf.js.mu.Unlock() if paf.doneCh == nil { paf.doneCh = make(chan *PubAck, 1) if paf.pa != nil { paf.doneCh <- paf.pa } } return paf.doneCh } func (paf *pubAckFuture) Err() <-chan error { paf.js.mu.Lock() defer paf.js.mu.Unlock() if paf.errCh == nil { paf.errCh = make(chan error, 1) if paf.err != nil { paf.errCh <- paf.err } } return paf.errCh } func (paf *pubAckFuture) Msg() *Msg { paf.js.mu.RLock() defer paf.js.mu.RUnlock() return paf.msg } // For quick token lookup etc. const aReplyTokensize = 6 func (js *js) newAsyncReply() string { js.mu.Lock() if js.rsub == nil { // Create our wildcard reply subject. sha := sha256.New() sha.Write([]byte(nuid.Next())) b := sha.Sum(nil) for i := 0; i < aReplyTokensize; i++ { b[i] = rdigits[int(b[i]%base)] } js.rpre = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize]) sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply) if err != nil { js.mu.Unlock() return _EMPTY_ } js.rsub = sub js.rr = rand.New(rand.NewSource(time.Now().UnixNano())) } if js.connStatusCh == nil { js.connStatusCh = js.nc.StatusChanged(RECONNECTING, CLOSED) go js.resetPendingAcksOnReconnect() } var sb strings.Builder sb.WriteString(js.rpre) for { rn := js.rr.Int63() var b [aReplyTokensize]byte for i, l := 0, rn; i < len(b); i++ { b[i] = rdigits[l%base] l /= base } if _, ok := js.pafs[string(b[:])]; ok { continue } sb.Write(b[:]) break } js.mu.Unlock() return sb.String() } func (js *js) resetPendingAcksOnReconnect() { js.mu.Lock() connStatusCh := js.connStatusCh js.mu.Unlock() for { newStatus, ok := <-connStatusCh if !ok || newStatus == CLOSED { return } js.mu.Lock() errCb := js.opts.aecb for id, paf := range js.pafs { paf.err = ErrDisconnected if paf.errCh != nil { paf.errCh <- paf.err } if errCb != nil { defer errCb(js, paf.msg, ErrDisconnected) } delete(js.pafs, id) } if js.dch != nil { close(js.dch) js.dch = nil } js.mu.Unlock() } } // CleanupPublisher will cleanup the publishing side of JetStreamContext. // // This will unsubscribe from the internal reply subject if needed. // All pending async publishes will fail with ErrJetStreamContextClosed. // // If an error handler was provided, it will be called for each pending async // publish and PublishAsyncComplete will be closed. // // After completing JetStreamContext is still usable - internal subscription // will be recreated on next publish, but the acks from previous publishes will // be lost. func (js *js) CleanupPublisher() { js.cleanupReplySub() js.mu.Lock() errCb := js.opts.aecb for id, paf := range js.pafs { paf.err = ErrJetStreamPublisherClosed if paf.errCh != nil { paf.errCh <- paf.err } if errCb != nil { defer errCb(js, paf.msg, ErrJetStreamPublisherClosed) } delete(js.pafs, id) } if js.dch != nil { close(js.dch) js.dch = nil } js.mu.Unlock() } func (js *js) cleanupReplySub() { js.mu.Lock() if js.rsub != nil { js.rsub.Unsubscribe() js.rsub = nil } if js.connStatusCh != nil { close(js.connStatusCh) js.connStatusCh = nil } js.mu.Unlock() } // registerPAF will register for a PubAckFuture. func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { js.mu.Lock() if js.pafs == nil { js.pafs = make(map[string]*pubAckFuture) } paf.js = js js.pafs[id] = paf np := len(js.pafs) maxpa := js.opts.maxpa js.mu.Unlock() return np, maxpa } // Lock should be held. func (js *js) getPAF(id string) *pubAckFuture { if js.pafs == nil { return nil } return js.pafs[id] } // clearPAF will remove a PubAckFuture that was registered. func (js *js) clearPAF(id string) { js.mu.Lock() delete(js.pafs, id) js.mu.Unlock() } // PublishAsyncPending returns how many PubAckFutures are pending. func (js *js) PublishAsyncPending() int { js.mu.RLock() defer js.mu.RUnlock() return len(js.pafs) } func (js *js) asyncStall() <-chan struct{} { js.mu.Lock() if js.stc == nil { js.stc = make(chan struct{}) } stc := js.stc js.mu.Unlock() return stc } // Handle an async reply from PublishAsync. func (js *js) handleAsyncReply(m *Msg) { if len(m.Subject) <= js.replyPrefixLen { return } id := m.Subject[js.replyPrefixLen:] js.mu.Lock() paf := js.getPAF(id) if paf == nil { js.mu.Unlock() return } closeStc := func() { // Check on anyone stalled and waiting. if js.stc != nil && len(js.pafs) < js.opts.maxpa { close(js.stc) js.stc = nil } } closeDchFn := func() func() { var dch chan struct{} // Check on anyone one waiting on done status. if js.dch != nil && len(js.pafs) == 0 { dch = js.dch js.dch = nil } // Return function to close done channel which // should be deferred so that error is processed and // can be checked. return func() { if dch != nil { close(dch) } } } doErr := func(err error) { paf.err = err if paf.errCh != nil { paf.errCh <- paf.err } cb := js.opts.aecb js.mu.Unlock() if cb != nil { cb(paf.js, paf.msg, err) } } if paf.timeout != nil { paf.timeout.Stop() } // Process no responders etc. if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { if paf.retries < paf.maxRetries { paf.retries++ time.AfterFunc(paf.retryWait, func() { js.mu.Lock() paf := js.getPAF(id) js.mu.Unlock() if paf == nil { return } _, err := js.PublishMsgAsync(paf.msg, pubOptFn(func(po *pubOpts) error { po.pafRetry = paf return nil })) if err != nil { js.mu.Lock() doErr(err) } }) js.mu.Unlock() return } delete(js.pafs, id) closeStc() defer closeDchFn()() doErr(ErrNoResponders) return } //remove delete(js.pafs, id) closeStc() defer closeDchFn()() var pa pubAckResponse if err := json.Unmarshal(m.Data, &pa); err != nil { doErr(ErrInvalidJSAck) return } if pa.Error != nil { doErr(pa.Error) return } if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { doErr(ErrInvalidJSAck) return } // So here we have received a proper puback. paf.pa = pa.PubAck if paf.doneCh != nil { paf.doneCh <- paf.pa } js.mu.Unlock() } // MsgErrHandler is used to process asynchronous errors from // JetStream PublishAsync. It will return the original // message sent to the server for possible retransmitting and the error encountered. type MsgErrHandler func(JetStream, *Msg, error) // PublishAsyncErrHandler sets the error handler for async publishes in JetStream. func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt { return jsOptFn(func(js *jsOpts) error { js.aecb = cb return nil }) } // PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time. func PublishAsyncMaxPending(max int) JSOpt { return jsOptFn(func(js *jsOpts) error { if max < 1 { return errors.New("nats: max ack pending should be >= 1") } js.maxpa = max return nil }) } // PublishAsyncTimeout sets the timeout for async message publish. // If not provided, timeout is disabled. func PublishAsyncTimeout(dur time.Duration) JSOpt { return jsOptFn(func(opts *jsOpts) error { opts.ackTimeout = dur return nil }) } // PublishAsync publishes a message to JetStream and returns a PubAckFuture func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) { return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) } const defaultStallWait = 200 * time.Millisecond func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { var o pubOpts if len(opts) > 0 { if m.Header == nil { m.Header = Header{} } for _, opt := range opts { if err := opt.configurePublish(&o); err != nil { return nil, err } } } if o.rnum < 0 { return nil, fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidArg) } // Timeouts and contexts do not make sense for these. if o.ttl != 0 || o.ctx != nil { return nil, ErrContextAndTimeout } stallWait := defaultStallWait if o.stallWait > 0 { stallWait = o.stallWait } // FIXME(dlc) - Make common. if o.id != _EMPTY_ { m.Header.Set(MsgIdHdr, o.id) } if o.lid != _EMPTY_ { m.Header.Set(ExpectedLastMsgIdHdr, o.lid) } if o.str != _EMPTY_ { m.Header.Set(ExpectedStreamHdr, o.str) } if o.seq != nil { m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) } if o.lss != nil { m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) } if o.msgTTL > 0 { m.Header.Set(MsgTTLHdr, o.msgTTL.String()) } // Reply paf := o.pafRetry if paf == nil && m.Reply != _EMPTY_ { return nil, errors.New("nats: reply subject should be empty") } var id string var reply string // register new paf if not retrying if paf == nil { reply = js.newAsyncReply() if reply == _EMPTY_ { return nil, errors.New("nats: error creating async reply handler") } id = reply[js.replyPrefixLen:] paf = &pubAckFuture{msg: m, st: time.Now(), maxRetries: o.rnum, retryWait: o.rwait, reply: reply} numPending, maxPending := js.registerPAF(id, paf) if maxPending > 0 && numPending > maxPending { select { case <-js.asyncStall(): case <-time.After(stallWait): js.clearPAF(id) return nil, ErrTooManyStalledMsgs } } if js.opts.ackTimeout > 0 { paf.timeout = time.AfterFunc(js.opts.ackTimeout, func() { js.mu.Lock() defer js.mu.Unlock() if _, ok := js.pafs[id]; !ok { // paf has already been resolved // while waiting for the lock return } // ack timed out, remove from pending acks delete(js.pafs, id) // check on anyone stalled and waiting. if js.stc != nil && len(js.pafs) < js.opts.maxpa { close(js.stc) js.stc = nil } // send error to user paf.err = ErrAsyncPublishTimeout if paf.errCh != nil { paf.errCh <- paf.err } // call error callback if set if js.opts.aecb != nil { js.opts.aecb(js, paf.msg, ErrAsyncPublishTimeout) } // check on anyone one waiting on done status. if js.dch != nil && len(js.pafs) == 0 { close(js.dch) js.dch = nil } }) } } else { reply = paf.reply if paf.timeout != nil { paf.timeout.Reset(js.opts.ackTimeout) } id = reply[js.replyPrefixLen:] } hdr, err := m.headerBytes() if err != nil { return nil, err } if err := js.nc.publish(m.Subject, reply, hdr, m.Data); err != nil { js.clearPAF(id) return nil, err } return paf, nil } // PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd. func (js *js) PublishAsyncComplete() <-chan struct{} { js.mu.Lock() defer js.mu.Unlock() if js.dch == nil { js.dch = make(chan struct{}) } dch := js.dch if len(js.pafs) == 0 { close(js.dch) js.dch = nil } return dch } // MsgId sets the message ID used for deduplication. func MsgId(id string) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.id = id return nil }) } // ExpectStream sets the expected stream to respond from the publish. func ExpectStream(stream string) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.str = stream return nil }) } // ExpectLastSequence sets the expected sequence in the response from the publish. func ExpectLastSequence(seq uint64) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.seq = &seq return nil }) } // ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. func ExpectLastSequencePerSubject(seq uint64) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.lss = &seq return nil }) } // ExpectLastMsgId sets the expected last msgId in the response from the publish. func ExpectLastMsgId(id string) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.lid = id return nil }) } // RetryWait sets the retry wait time when ErrNoResponders is encountered. func RetryWait(dur time.Duration) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.rwait = dur return nil }) } // RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. func RetryAttempts(num int) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.rnum = num return nil }) } // StallWait sets the max wait when the producer becomes stall producing messages. func StallWait(ttl time.Duration) PubOpt { return pubOptFn(func(opts *pubOpts) error { if ttl <= 0 { return errors.New("nats: stall wait should be more than 0") } opts.stallWait = ttl return nil }) } // MsgTTL sets per msg TTL. // Requires [StreamConfig.AllowMsgTTL] to be enabled. func MsgTTL(dur time.Duration) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.msgTTL = dur return nil }) } type ackOpts struct { ttl time.Duration ctx context.Context nakDelay time.Duration } // AckOpt are the options that can be passed when acknowledge a message. type AckOpt interface { configureAck(opts *ackOpts) error } // MaxWait sets the maximum amount of time we will wait for a response. type MaxWait time.Duration func (ttl MaxWait) configureJSContext(js *jsOpts) error { js.wait = time.Duration(ttl) return nil } func (ttl MaxWait) configurePull(opts *pullOpts) error { opts.ttl = time.Duration(ttl) return nil } // AckWait sets the maximum amount of time we will wait for an ack. type AckWait time.Duration func (ttl AckWait) configurePublish(opts *pubOpts) error { opts.ttl = time.Duration(ttl) return nil } func (ttl AckWait) configureSubscribe(opts *subOpts) error { opts.cfg.AckWait = time.Duration(ttl) return nil } func (ttl AckWait) configureAck(opts *ackOpts) error { opts.ttl = time.Duration(ttl) return nil } // ContextOpt is an option used to set a context.Context. type ContextOpt struct { context.Context } func (ctx ContextOpt) configureJSContext(opts *jsOpts) error { opts.ctx = ctx return nil } func (ctx ContextOpt) configurePublish(opts *pubOpts) error { opts.ctx = ctx return nil } func (ctx ContextOpt) configureSubscribe(opts *subOpts) error { opts.ctx = ctx return nil } func (ctx ContextOpt) configurePull(opts *pullOpts) error { opts.ctx = ctx return nil } func (ctx ContextOpt) configureAck(opts *ackOpts) error { opts.ctx = ctx return nil } // Context returns an option that can be used to configure a context for APIs // that are context aware such as those part of the JetStream interface. func Context(ctx context.Context) ContextOpt { return ContextOpt{ctx} } type nakDelay time.Duration func (d nakDelay) configureAck(opts *ackOpts) error { opts.nakDelay = time.Duration(d) return nil } // Subscribe // ConsumerConfig is the configuration of a JetStream consumer. type ConsumerConfig struct { Durable string `json:"durable_name,omitempty"` Name string `json:"name,omitempty"` Description string `json:"description,omitempty"` DeliverPolicy DeliverPolicy `json:"deliver_policy"` OptStartSeq uint64 `json:"opt_start_seq,omitempty"` OptStartTime *time.Time `json:"opt_start_time,omitempty"` AckPolicy AckPolicy `json:"ack_policy"` AckWait time.Duration `json:"ack_wait,omitempty"` MaxDeliver int `json:"max_deliver,omitempty"` BackOff []time.Duration `json:"backoff,omitempty"` FilterSubject string `json:"filter_subject,omitempty"` FilterSubjects []string `json:"filter_subjects,omitempty"` ReplayPolicy ReplayPolicy `json:"replay_policy"` RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec SampleFrequency string `json:"sample_freq,omitempty"` MaxWaiting int `json:"max_waiting,omitempty"` MaxAckPending int `json:"max_ack_pending,omitempty"` FlowControl bool `json:"flow_control,omitempty"` Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` HeadersOnly bool `json:"headers_only,omitempty"` // Pull based options. MaxRequestBatch int `json:"max_batch,omitempty"` MaxRequestExpires time.Duration `json:"max_expires,omitempty"` MaxRequestMaxBytes int `json:"max_bytes,omitempty"` // Push based consumers. DeliverSubject string `json:"deliver_subject,omitempty"` DeliverGroup string `json:"deliver_group,omitempty"` // Inactivity threshold. InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` // Generally inherited by parent stream and other markers, now can be configured directly. Replicas int `json:"num_replicas"` // Force memory storage. MemoryStorage bool `json:"mem_storage,omitempty"` // Metadata is additional metadata for the Consumer. // Keys starting with `_nats` are reserved. // NOTE: Metadata requires nats-server v2.10.0+ Metadata map[string]string `json:"metadata,omitempty"` } // ConsumerInfo is the info from a JetStream consumer. type ConsumerInfo struct { Stream string `json:"stream_name"` Name string `json:"name"` Created time.Time `json:"created"` Config ConsumerConfig `json:"config"` Delivered SequenceInfo `json:"delivered"` AckFloor SequenceInfo `json:"ack_floor"` NumAckPending int `json:"num_ack_pending"` NumRedelivered int `json:"num_redelivered"` NumWaiting int `json:"num_waiting"` NumPending uint64 `json:"num_pending"` Cluster *ClusterInfo `json:"cluster,omitempty"` PushBound bool `json:"push_bound,omitempty"` } // SequenceInfo has both the consumer and the stream sequence and last activity. type SequenceInfo struct { Consumer uint64 `json:"consumer_seq"` Stream uint64 `json:"stream_seq"` Last *time.Time `json:"last_active,omitempty"` } // SequencePair includes the consumer and stream sequence info from a JetStream consumer. type SequencePair struct { Consumer uint64 `json:"consumer_seq"` Stream uint64 `json:"stream_seq"` } // nextRequest is for getting next messages for pull based consumers from JetStream. type nextRequest struct { Expires time.Duration `json:"expires,omitempty"` Batch int `json:"batch,omitempty"` NoWait bool `json:"no_wait,omitempty"` MaxBytes int `json:"max_bytes,omitempty"` Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` } // jsSub includes JetStream subscription info. type jsSub struct { js *js // For pull subscribers, this is the next message subject to send requests to. nms string psubj string // the subject that was passed by user to the subscribe calls consumer string stream string deliver string pull bool dc bool // Delete JS consumer ackNone bool // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the // add consumer response. Note that some versions of the server gather the // consumer info *after* the creation of the consumer, which means that // some messages may have been already delivered. So the sum of the two // is a more accurate representation of the number of messages pending or // in the process of being delivered to the subscription when created. pending uint64 // Ordered consumers ordered bool dseq uint64 sseq uint64 ccreq *createConsumerRequest // Heartbeats and Flow Control handling from push consumers. hbc *time.Timer hbi time.Duration active bool cmeta string fcr string fcd uint64 fciseq uint64 csfct *time.Timer // context set on js.Subscribe used e.g. to recreate ordered consumer ctx context.Context // Cancellation function to cancel context on drain/unsubscribe. cancel func() } // Deletes the JS Consumer. // No connection nor subscription lock must be held on entry. func (sub *Subscription) deleteConsumer() error { sub.mu.Lock() jsi := sub.jsi if jsi == nil { sub.mu.Unlock() return nil } if jsi.stream == _EMPTY_ || jsi.consumer == _EMPTY_ { sub.mu.Unlock() return nil } stream, consumer := jsi.stream, jsi.consumer js := jsi.js sub.mu.Unlock() return js.DeleteConsumer(stream, consumer) } // SubOpt configures options for subscribing to JetStream consumers. type SubOpt interface { configureSubscribe(opts *subOpts) error } // subOptFn is a function option used to configure a JetStream Subscribe. type subOptFn func(opts *subOpts) error func (opt subOptFn) configureSubscribe(opts *subOpts) error { return opt(opts) } // Subscribe creates an async Subscription for JetStream. // The stream and consumer names can be provided with the nats.Bind() option. // For creating an ephemeral (where the consumer name is picked by the server), // you can provide the stream name with nats.BindStream(). // If no stream name is specified, the library will attempt to figure out which // stream the subscription is for. See important notes below for more details. // // IMPORTANT NOTES: // * If none of the options Bind() nor Durable() are specified, the library will // send a request to the server to create an ephemeral JetStream consumer, // which will be deleted after an Unsubscribe() or Drain(), or automatically // by the server after a short period of time after the NATS subscription is // gone. // * If Durable() option is specified, the library will attempt to lookup a JetStream // consumer with this name, and if found, will bind to it and not attempt to // delete it. However, if not found, the library will send a request to create // such durable JetStream consumer. The library will delete the JetStream consumer // after an Unsubscribe() or Drain(). // * If Bind() option is provided, the library will attempt to lookup the // consumer with the given name, and if successful, bind to it. If the lookup fails, // then the Subscribe() call will return an error. func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { if cb == nil { return nil, ErrBadSubscription } return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts) } // SubscribeSync creates a Subscription that can be used to process messages synchronously. // See important note in Subscribe() func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) { mch := make(chan *Msg, js.nc.Opts.SubChanLen) return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts) } // QueueSubscribe creates a Subscription with a queue group. // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. // See important note in Subscribe() func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { if cb == nil { return nil, ErrBadSubscription } return js.subscribe(subj, queue, cb, nil, false, false, opts) } // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. // See important note in QueueSubscribe() func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) { mch := make(chan *Msg, js.nc.Opts.SubChanLen) return js.subscribe(subj, queue, nil, mch, true, false, opts) } // ChanSubscribe creates channel based Subscription. // Using ChanSubscribe without buffered capacity is not recommended since // it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough // capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison. // See important note in Subscribe() func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts) } // ChanQueueSubscribe creates channel based Subscription with a queue group. // See important note in QueueSubscribe() func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { return js.subscribe(subj, queue, nil, ch, false, false, opts) } // PullSubscribe creates a Subscription that can fetch messages. // See important note in Subscribe() func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { mch := make(chan *Msg, js.nc.Opts.SubChanLen) if durable != "" { opts = append(opts, Durable(durable)) } return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts) } func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) { ccfg := &info.Config // Make sure this new subject matches or is a subset. if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject { return _EMPTY_, ErrSubjectMismatch } // Prevent binding a subscription against incompatible consumer types. if isPullMode && ccfg.DeliverSubject != _EMPTY_ { return _EMPTY_, ErrPullSubscribeToPushConsumer } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { return _EMPTY_, ErrPullSubscribeRequired } // If pull mode, nothing else to check here. if isPullMode { return _EMPTY_, checkConfig(ccfg, userCfg) } // At this point, we know the user wants push mode, and the JS consumer is // really push mode. dg := info.Config.DeliverGroup if dg == _EMPTY_ { // Prevent an user from attempting to create a queue subscription on // a JS consumer that was not created with a deliver group. if queue != _EMPTY_ { return _EMPTY_, errors.New("cannot create a queue subscription for a consumer without a deliver group") } else if info.PushBound { // Need to reject a non queue subscription to a non queue consumer // if the consumer is already bound. return _EMPTY_, errors.New("consumer is already bound to a subscription") } } else { // If the JS consumer has a deliver group, we need to fail a non queue // subscription attempt: if queue == _EMPTY_ { return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg) } else if queue != dg { // Here the user's queue group name does not match the one associated // with the JS consumer. return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q", queue, dg) } } if err := checkConfig(ccfg, userCfg); err != nil { return _EMPTY_, err } return ccfg.DeliverSubject, nil } func checkConfig(s, u *ConsumerConfig) error { makeErr := func(fieldName string, usrVal, srvVal any) error { return fmt.Errorf("nats: configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal) } if u.Durable != _EMPTY_ && u.Durable != s.Durable { return makeErr("durable", u.Durable, s.Durable) } if u.Description != _EMPTY_ && u.Description != s.Description { return makeErr("description", u.Description, s.Description) } if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy { return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy) } if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq { return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq) } if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) { return makeErr("optional start time", u.OptStartTime, s.OptStartTime) } if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy { return makeErr("ack policy", u.AckPolicy, s.AckPolicy) } if u.AckWait > 0 && u.AckWait != s.AckWait { return makeErr("ack wait", u.AckWait, s.AckWait) } if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver { return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver) } if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy { return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy) } if u.RateLimit > 0 && u.RateLimit != s.RateLimit { return makeErr("rate limit", u.RateLimit, s.RateLimit) } if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency { return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency) } if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting { return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting) } if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending { return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending) } // For flow control, we want to fail if the user explicit wanted it, but // it is not set in the existing consumer. If it is not asked by the user, // the library still handles it and so no reason to fail. if u.FlowControl && !s.FlowControl { return makeErr("flow control", u.FlowControl, s.FlowControl) } if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat { return makeErr("heartbeat", u.Heartbeat, s.Heartbeat) } if u.Replicas > 0 && u.Replicas != s.Replicas { return makeErr("replicas", u.Replicas, s.Replicas) } if u.MemoryStorage && !s.MemoryStorage { return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage) } return nil } func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) { cfg := ConsumerConfig{ DeliverPolicy: deliverPolicyNotSet, AckPolicy: ackPolicyNotSet, ReplayPolicy: replayPolicyNotSet, } o := subOpts{cfg: &cfg} if len(opts) > 0 { for _, opt := range opts { if opt == nil { continue } if err := opt.configureSubscribe(&o); err != nil { return nil, err } } } // If no stream name is specified, the subject cannot be empty. if subj == _EMPTY_ && o.stream == _EMPTY_ { return nil, errors.New("nats: subject required") } // Note that these may change based on the consumer info response we may get. hasHeartbeats := o.cfg.Heartbeat > 0 hasFC := o.cfg.FlowControl // Some checks for pull subscribers if isPullMode { // No deliver subject should be provided if o.cfg.DeliverSubject != _EMPTY_ { return nil, ErrPullSubscribeToPushConsumer } } // Some check/setting specific to queue subs if queue != _EMPTY_ { // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched // to members). We may in the future have a separate NATS subscription that all members // would subscribe to and server would send on. if o.cfg.Heartbeat > 0 || o.cfg.FlowControl { // Not making this a public ErrXXX in case we allow in the future. return nil, errors.New("nats: queue subscription doesn't support idle heartbeat nor flow control") } // If this is a queue subscription and no consumer nor durable name was specified, // then we will use the queue name as a durable name. if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ { if err := checkConsumerName(queue); err != nil { return nil, err } o.cfg.Durable = queue } } var ( err error shouldCreate bool info *ConsumerInfo deliver string stream = o.stream consumer = o.consumer isDurable = o.cfg.Durable != _EMPTY_ consumerBound = o.bound ctx = o.ctx skipCInfo = o.skipCInfo notFoundErr bool lookupErr bool nc = js.nc nms string hbi time.Duration ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers. maxap int ) // Do some quick checks here for ordered consumers. We do these here instead of spread out // in the individual SubOpts. if o.ordered { // Make sure we are not durable. if isDurable { return nil, errors.New("nats: durable can not be set for an ordered consumer") } // Check ack policy. if o.cfg.AckPolicy != ackPolicyNotSet { return nil, errors.New("nats: ack policy can not be set for an ordered consumer") } // Check max deliver. if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 { return nil, errors.New("nats: max deliver can not be set for an ordered consumer") } // No deliver subject, we pick our own. if o.cfg.DeliverSubject != _EMPTY_ { return nil, errors.New("nats: deliver subject can not be set for an ordered consumer") } // Queue groups not allowed. if queue != _EMPTY_ { return nil, errors.New("nats: queues not be set for an ordered consumer") } // Check for bound consumers. if consumer != _EMPTY_ { return nil, errors.New("nats: can not bind existing consumer for an ordered consumer") } // Check for pull mode. if isPullMode { return nil, errors.New("nats: can not use pull mode for an ordered consumer") } // Setup how we need it to be here. o.cfg.FlowControl = true o.cfg.AckPolicy = AckNonePolicy o.cfg.MaxDeliver = 1 o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized. // Force R1 and MemoryStorage for these. o.cfg.Replicas = 1 o.cfg.MemoryStorage = true if !hasHeartbeats { o.cfg.Heartbeat = orderedHeartbeatsInterval } hasFC, hasHeartbeats = true, true o.mack = true // To avoid auto-ack wrapping call below. hbi = o.cfg.Heartbeat } // In case a consumer has not been set explicitly, then the // durable name will be used as the consumer name. if consumer == _EMPTY_ { consumer = o.cfg.Durable } // Find the stream mapped to the subject if not bound to a stream already. if stream == _EMPTY_ { stream, err = js.StreamNameBySubject(subj) if err != nil { return nil, err } } // With an explicit durable name, we can lookup the consumer first // to which it should be attaching to. // If SkipConsumerLookup was used, do not call consumer info. if consumer != _EMPTY_ && !o.skipCInfo { info, err = js.ConsumerInfo(stream, consumer) notFoundErr = errors.Is(err, ErrConsumerNotFound) lookupErr = err == ErrJetStreamNotEnabled || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) } switch { case info != nil: deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) if err != nil { return nil, err } icfg := &info.Config hasFC, hbi = icfg.FlowControl, icfg.Heartbeat hasHeartbeats = hbi > 0 maxap = icfg.MaxAckPending case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): // If the consumer is being bound and we got an error on pull subscribe then allow the error. if !(isPullMode && lookupErr && consumerBound) { return nil, err } case skipCInfo: // When skipping consumer info, need to rely on the manually passed sub options // to match the expected behavior from the subscription. hasFC, hbi = o.cfg.FlowControl, o.cfg.Heartbeat hasHeartbeats = hbi > 0 maxap = o.cfg.MaxAckPending deliver = o.cfg.DeliverSubject if consumerBound { break } // When not bound to a consumer already, proceed to create. fallthrough default: // Attempt to create consumer if not found nor using Bind. shouldCreate = true if o.cfg.DeliverSubject != _EMPTY_ { deliver = o.cfg.DeliverSubject } else if !isPullMode { deliver = nc.NewInbox() cfg.DeliverSubject = deliver } // Do filtering always, server will clear as needed. cfg.FilterSubject = subj // Pass the queue to the consumer config if queue != _EMPTY_ { cfg.DeliverGroup = queue } // If not set, default to deliver all if cfg.DeliverPolicy == deliverPolicyNotSet { cfg.DeliverPolicy = DeliverAllPolicy } // If not set, default to ack explicit. if cfg.AckPolicy == ackPolicyNotSet { cfg.AckPolicy = AckExplicitPolicy } // If not set, default to instant if cfg.ReplayPolicy == replayPolicyNotSet { cfg.ReplayPolicy = ReplayInstantPolicy } // If we have acks at all and the MaxAckPending is not set go ahead // and set to the internal max for channel based consumers if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy { cfg.MaxAckPending = cap(ch) } // Create request here. ccreq = &createConsumerRequest{ Stream: stream, Config: &cfg, } hbi = cfg.Heartbeat } if isPullMode { nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer) deliver = nc.NewInbox() // for pull consumers, create a wildcard subscription to differentiate pull requests deliver += ".*" } // In case this has a context, then create a child context that // is possible to cancel via unsubscribe / drain. var cancel func() if ctx != nil { ctx, cancel = context.WithCancel(ctx) } jsi := &jsSub{ js: js, stream: stream, consumer: consumer, deliver: deliver, hbi: hbi, ordered: o.ordered, ccreq: ccreq, dseq: 1, pull: isPullMode, nms: nms, psubj: subj, cancel: cancel, ackNone: o.cfg.AckPolicy == AckNonePolicy, ctx: o.ctx, } // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy { ocb := cb cb = func(m *Msg) { ocb(m); m.Ack() } } sub, err := nc.subscribe(deliver, queue, cb, ch, nil, isSync, jsi) if err != nil { return nil, err } // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain. // We need to clear the jsi so we do not remove any durables etc. cleanUpSub := func() { if sub != nil { sub.mu.Lock() sub.jsi = nil sub.mu.Unlock() sub.Unsubscribe() } } // If we are creating or updating let's process that request. consName := o.cfg.Name if shouldCreate { if cfg.Durable != "" { consName = cfg.Durable } else if consName == "" { consName = getHash(nuid.Next()) } var info *ConsumerInfo if o.ctx != nil { info, err = js.upsertConsumer(stream, consName, ccreq.Config, Context(o.ctx)) } else { info, err = js.upsertConsumer(stream, consName, ccreq.Config) } if err != nil { var apiErr *APIError if ok := errors.As(err, &apiErr); !ok { cleanUpSub() return nil, err } if consumer == _EMPTY_ || (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) { cleanUpSub() if errors.Is(apiErr, ErrStreamNotFound) { return nil, ErrStreamNotFound } return nil, err } // We will not be using this sub here if we were push based. if !isPullMode { cleanUpSub() } info, err = js.ConsumerInfo(stream, consumer) if err != nil { return nil, err } deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) if err != nil { return nil, err } if !isPullMode { // We can't reuse the channel, so if one was passed, we need to create a new one. if isSync { ch = make(chan *Msg, cap(ch)) } else if ch != nil { // User provided (ChanSubscription), simply try to drain it. for done := false; !done; { select { case <-ch: default: done = true } } } jsi.deliver = deliver jsi.hbi = info.Config.Heartbeat // Recreate the subscription here. sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, nil, isSync, jsi) if err != nil { return nil, err } hasFC = info.Config.FlowControl hasHeartbeats = info.Config.Heartbeat > 0 } } else { // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain() sub.mu.Lock() sub.jsi.dc = true sub.jsi.pending = info.NumPending + info.Delivered.Consumer // If this is an ephemeral, we did not have a consumer name, we get it from the info // after the AddConsumer returns. if consumer == _EMPTY_ { sub.jsi.consumer = info.Name if isPullMode { sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name) } } sub.mu.Unlock() } // Capture max ack pending from the info response here which covers both // success and failure followed by consumer lookup. maxap = info.Config.MaxAckPending } // If maxap is greater than the default sub's pending limit, use that. if maxap > DefaultSubPendingMsgsLimit { // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit bl := maxap * 1024 * 1024 if bl < DefaultSubPendingBytesLimit { bl = DefaultSubPendingBytesLimit } if err := sub.SetPendingLimits(maxap, bl); err != nil { return nil, err } } // Do heartbeats last if needed. if hasHeartbeats { sub.scheduleHeartbeatCheck() } // For ChanSubscriptions, if we know that there is flow control, we will // start a go routine that evaluates the number of delivered messages // and process flow control. if sub.Type() == ChanSubscription && hasFC { sub.chanSubcheckForFlowControlResponse() } // Wait for context to get canceled if there is one. if ctx != nil { go func() { <-ctx.Done() sub.Unsubscribe() }() } return sub, nil } // InitialConsumerPending returns the number of messages pending to be // delivered to the consumer when the subscription was created. func (sub *Subscription) InitialConsumerPending() (uint64, error) { sub.mu.Lock() defer sub.mu.Unlock() if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { return 0, fmt.Errorf("%w: not a JetStream subscription", ErrTypeSubscription) } return sub.jsi.pending, nil } // This long-lived routine is used per ChanSubscription to check // on the number of delivered messages and check for flow control response. func (sub *Subscription) chanSubcheckForFlowControlResponse() { sub.mu.Lock() // We don't use defer since if we need to send an RC reply, we need // to do it outside the sub's lock. So doing explicit unlock... if sub.closed { sub.mu.Unlock() return } var fcReply string var nc *Conn jsi := sub.jsi if jsi.csfct == nil { jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse) } else { fcReply = sub.checkForFlowControlResponse() nc = sub.conn // Do the reset here under the lock, it's ok... jsi.csfct.Reset(chanSubFCCheckInterval) } sub.mu.Unlock() // This call will return an error (which we don't care here) // if nc is nil or fcReply is empty. nc.Publish(fcReply, nil) } // ErrConsumerSequenceMismatch represents an error from a consumer // that received a Heartbeat including sequence different to the // one expected from the view of the client. type ErrConsumerSequenceMismatch struct { // StreamResumeSequence is the stream sequence from where the consumer // should resume consuming from the stream. StreamResumeSequence uint64 // ConsumerSequence is the sequence of the consumer that is behind. ConsumerSequence uint64 // LastConsumerSequence is the sequence of the consumer when the heartbeat // was received. LastConsumerSequence uint64 } func (ecs *ErrConsumerSequenceMismatch) Error() string { return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d", ecs.ConsumerSequence, ecs.LastConsumerSequence-ecs.ConsumerSequence, ecs.StreamResumeSequence, ) } // isJSControlMessage will return true if this is an empty control status message // and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC func isJSControlMessage(msg *Msg) (bool, int) { if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg { return false, 0 } val := msg.Header.Get(descrHdr) if strings.HasPrefix(val, "Idle") { return true, jsCtrlHB } if strings.HasPrefix(val, "Flow") { return true, jsCtrlFC } return true, 0 } // Keeps track of the incoming message's reply subject so that the consumer's // state (deliver sequence, etc..) can be checked against heartbeats. // We will also bump the incoming data message sequence that is used in FC cases. // Runs under the subscription lock func (sub *Subscription) trackSequences(reply string) { // For flow control, keep track of incoming message sequence. sub.jsi.fciseq++ sub.jsi.cmeta = reply } // Check to make sure messages are arriving in order. // Returns true if the sub had to be replaced. Will cause upper layers to return. // The caller has verified that sub.jsi != nil and that this is not a control message. // Lock should be held. func (sub *Subscription) checkOrderedMsgs(m *Msg) bool { // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere. if m.Reply == _EMPTY_ { return false } // Normal message here. tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { return false } sseq, dseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) jsi := sub.jsi if dseq != jsi.dseq { sub.resetOrderedConsumer(jsi.sseq + 1) return true } // Update our tracking here. jsi.dseq, jsi.sseq = dseq+1, sseq return false } // Update and replace sid. // Lock should be held on entry but will be unlocked to prevent lock inversion. func (sub *Subscription) applyNewSID() (osid int64) { nc := sub.conn sub.mu.Unlock() nc.subsMu.Lock() osid = sub.sid delete(nc.subs, osid) // Place new one. nc.ssid++ nsid := nc.ssid nc.subs[nsid] = sub nc.subsMu.Unlock() sub.mu.Lock() sub.sid = nsid return osid } // We are here if we have detected a gap with an ordered consumer. // We will create a new consumer and rewire the low level subscription. // Lock should be held. func (sub *Subscription) resetOrderedConsumer(sseq uint64) { nc := sub.conn if sub.jsi == nil || nc == nil || sub.closed { return } var maxStr string // If there was an AUTO_UNSUB done, we need to adjust the new value // to send after the SUB for the new sid. if sub.max > 0 { if sub.jsi.fciseq < sub.max { adjustedMax := sub.max - sub.jsi.fciseq maxStr = strconv.Itoa(int(adjustedMax)) } else { // We are already at the max, so we should just unsub the // existing sub and be done go func(sid int64) { nc.mu.Lock() nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_)) nc.kickFlusher() nc.mu.Unlock() }(sub.sid) return } } // Quick unsubscribe. Since we know this is a simple push subscriber we do in place. osid := sub.applyNewSID() // Grab new inbox. newDeliver := nc.NewInbox() sub.Subject = newDeliver // Snapshot the new sid under sub lock. nsid := sub.sid // We are still in the low level readLoop for the connection so we need // to spin a go routine to try to create the new consumer. go func() { // Unsubscribe and subscribe with new inbox and sid. // Remap a new low level sub into this sub since its client accessible. // This is done here in this go routine to prevent lock inversion. nc.mu.Lock() nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_)) nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid)) if maxStr != _EMPTY_ { nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr)) } nc.kickFlusher() nc.mu.Unlock() pushErr := func(err error) { nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err)) nc.unsubscribe(sub, 0, true) } sub.mu.Lock() jsi := sub.jsi // Reset some items in jsi. jsi.dseq = 1 jsi.cmeta = _EMPTY_ jsi.fcr, jsi.fcd = _EMPTY_, 0 jsi.deliver = newDeliver // Reset consumer request for starting policy. cfg := jsi.ccreq.Config cfg.DeliverSubject = newDeliver cfg.DeliverPolicy = DeliverByStartSequencePolicy cfg.OptStartSeq = sseq // In case the consumer was created with a start time, we need to clear it // since we are now using a start sequence. cfg.OptStartTime = nil js := jsi.js sub.mu.Unlock() sub.mu.Lock() // Attempt to delete the existing consumer. // We don't wait for the response since even if it's unsuccessful, // inactivity threshold will kick in and delete it. if jsi.consumer != _EMPTY_ { go js.DeleteConsumer(jsi.stream, jsi.consumer) } jsi.consumer = "" sub.mu.Unlock() consName := getHash(nuid.Next()) var cinfo *ConsumerInfo var err error if js.opts.ctx != nil { cinfo, err = js.upsertConsumer(jsi.stream, consName, cfg, Context(js.opts.ctx)) } else { cinfo, err = js.upsertConsumer(jsi.stream, consName, cfg) } if err != nil { var apiErr *APIError if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { // if creating consumer failed, retry return } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeInsufficientResourcesErr { // retry for insufficient resources, as it may mean that client is connected to a running // server in cluster while the server hosting R1 JetStream resources is restarting return } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeJetStreamNotAvailable { // retry if JetStream meta leader is temporarily unavailable return } pushErr(err) return } sub.mu.Lock() jsi.consumer = cinfo.Name sub.mu.Unlock() }() } // For jetstream subscriptions, returns the number of delivered messages. // For ChanSubscription, this value is computed based on the known number // of messages added to the channel minus the current size of that channel. // Lock held on entry func (sub *Subscription) getJSDelivered() uint64 { if sub.typ == ChanSubscription { return sub.jsi.fciseq - uint64(len(sub.mch)) } return sub.delivered } // checkForFlowControlResponse will check to see if we should send a flow control response // based on the subscription current delivered index and the target. // Runs under subscription lock func (sub *Subscription) checkForFlowControlResponse() string { // Caller has verified that there is a sub.jsi and fc jsi := sub.jsi jsi.active = true if sub.getJSDelivered() >= jsi.fcd { fcr := jsi.fcr jsi.fcr, jsi.fcd = _EMPTY_, 0 return fcr } return _EMPTY_ } // Record an inbound flow control message. // Runs under subscription lock func (sub *Subscription) scheduleFlowControlResponse(reply string) { sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq } // Checks for activity from our consumer. // If we do not think we are active send an async error. func (sub *Subscription) activityCheck() { sub.mu.Lock() jsi := sub.jsi if jsi == nil || sub.closed { sub.mu.Unlock() return } active := jsi.active jsi.hbc.Reset(jsi.hbi * hbcThresh) jsi.active = false nc := sub.conn sub.mu.Unlock() if !active { if !jsi.ordered || nc.Status() != CONNECTED { nc.mu.Lock() if errCB := nc.Opts.AsyncErrorCB; errCB != nil { nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) }) } nc.mu.Unlock() return } sub.mu.Lock() sub.resetOrderedConsumer(jsi.sseq + 1) sub.mu.Unlock() } } // scheduleHeartbeatCheck sets up the timer check to make sure we are active // or receiving idle heartbeats.. func (sub *Subscription) scheduleHeartbeatCheck() { sub.mu.Lock() defer sub.mu.Unlock() jsi := sub.jsi if jsi == nil { return } if jsi.hbc == nil { jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck) } else { jsi.hbc.Reset(jsi.hbi * hbcThresh) } } // handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer. func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) { nc.mu.Lock() errCB := nc.Opts.AsyncErrorCB if errCB != nil { nc.ach.push(func() { errCB(nc, sub, err) }) } nc.mu.Unlock() } // checkForSequenceMismatch will make sure we have not missed any messages since last seen. func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) { // Process heartbeat received, get latest control metadata if present. s.mu.Lock() ctrl, ordered := jsi.cmeta, jsi.ordered jsi.active = true s.mu.Unlock() if ctrl == _EMPTY_ { return } tokens, err := parser.GetMetadataFields(ctrl) if err != nil { return } // Consumer sequence. var ldseq string dseq := tokens[parser.AckConsumerSeqTokenPos] hdr := msg.Header[lastConsumerSeqHdr] if len(hdr) == 1 { ldseq = hdr[0] } // Detect consumer sequence mismatch and whether // should restart the consumer. if ldseq != dseq { // Dispatch async error including details such as // from where the consumer could be restarted. sseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) if ordered { s.mu.Lock() s.resetOrderedConsumer(jsi.sseq + 1) s.mu.Unlock() } else { ecs := &ErrConsumerSequenceMismatch{ StreamResumeSequence: uint64(sseq), ConsumerSequence: parser.ParseNum(dseq), LastConsumerSequence: parser.ParseNum(ldseq), } nc.handleConsumerSequenceMismatch(s, ecs) } } } type streamRequest struct { Subject string `json:"subject,omitempty"` } type streamNamesResponse struct { apiResponse apiPaged Streams []string `json:"streams"` } type subOpts struct { // For attaching. stream, consumer string // For creating or updating. cfg *ConsumerConfig // For binding a subscription to a consumer without creating it. bound bool // For manual ack mack bool // For an ordered consumer. ordered bool ctx context.Context // To disable calling ConsumerInfo skipCInfo bool } // SkipConsumerLookup will omit looking up consumer when [Bind], [Durable] // or [ConsumerName] are provided. // // NOTE: This setting may cause an existing consumer to be overwritten. Also, // because consumer lookup is skipped, all consumer options like AckPolicy, // DeliverSubject etc. need to be provided even if consumer already exists. func SkipConsumerLookup() SubOpt { return subOptFn(func(opts *subOpts) error { opts.skipCInfo = true return nil }) } // OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages. // There are no redeliveries and no acks, and flow control and heartbeats will be added but // will be taken care of without additional client code. func OrderedConsumer() SubOpt { return subOptFn(func(opts *subOpts) error { opts.ordered = true return nil }) } // ManualAck disables auto ack functionality for async subscriptions. func ManualAck() SubOpt { return subOptFn(func(opts *subOpts) error { opts.mack = true return nil }) } // Description will set the description for the created consumer. func Description(description string) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.Description = description return nil }) } // Durable defines the consumer name for JetStream durable subscribers. // This function will return ErrInvalidConsumerName if the name contains // any dot ".". func Durable(consumer string) SubOpt { return subOptFn(func(opts *subOpts) error { if opts.cfg.Durable != _EMPTY_ { return errors.New("nats: option Durable set more than once") } if opts.consumer != _EMPTY_ && opts.consumer != consumer { return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) } if err := checkConsumerName(consumer); err != nil { return err } opts.cfg.Durable = consumer return nil }) } // DeliverAll will configure a Consumer to receive all the // messages from a Stream. func DeliverAll() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverAllPolicy return nil }) } // DeliverLast configures a Consumer to receive messages // starting with the latest one. func DeliverLast() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverLastPolicy return nil }) } // DeliverLastPerSubject configures a Consumer to receive messages // starting with the latest one for each filtered subject. func DeliverLastPerSubject() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy return nil }) } // DeliverNew configures a Consumer to receive messages // published after the subscription. func DeliverNew() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverNewPolicy return nil }) } // StartSequence configures a Consumer to receive // messages from a start sequence. func StartSequence(seq uint64) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy opts.cfg.OptStartSeq = seq return nil }) } // StartTime configures a Consumer to receive // messages from a start time. func StartTime(startTime time.Time) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverPolicy = DeliverByStartTimePolicy opts.cfg.OptStartTime = &startTime return nil }) } // AckNone requires no acks for delivered messages. func AckNone() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.AckPolicy = AckNonePolicy return nil }) } // AckAll when acking a sequence number, this implicitly acks all sequences // below this one as well. func AckAll() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.AckPolicy = AckAllPolicy return nil }) } // AckExplicit requires ack or nack for all messages. func AckExplicit() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.AckPolicy = AckExplicitPolicy return nil }) } // MaxDeliver sets the number of redeliveries for a message. func MaxDeliver(n int) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxDeliver = n return nil }) } // MaxAckPending sets the number of outstanding acks that are allowed before // message delivery is halted. func MaxAckPending(n int) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxAckPending = n return nil }) } // ReplayOriginal replays the messages at the original speed. func ReplayOriginal() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.ReplayPolicy = ReplayOriginalPolicy return nil }) } // ReplayInstant replays the messages as fast as possible. func ReplayInstant() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.ReplayPolicy = ReplayInstantPolicy return nil }) } // RateLimit is the Bits per sec rate limit applied to a push consumer. func RateLimit(n uint64) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.RateLimit = n return nil }) } // BackOff is an array of time durations that represent the time to delay based on delivery count. func BackOff(backOff []time.Duration) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.BackOff = backOff return nil }) } // BindStream binds a consumer to a stream explicitly based on a name. // When a stream name is not specified, the library uses the subscribe // subject as a way to find the stream name. It is done by making a request // to the server to get list of stream names that have a filter for this // subject. If the returned list contains a single stream, then this // stream name will be used, otherwise the `ErrNoMatchingStream` is returned. // To avoid the stream lookup, provide the stream name with this function. // See also `Bind()`. func BindStream(stream string) SubOpt { return subOptFn(func(opts *subOpts) error { if opts.stream != _EMPTY_ && opts.stream != stream { return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) } opts.stream = stream return nil }) } // Bind binds a subscription to an existing consumer from a stream without attempting to create. // The first argument is the stream name and the second argument will be the consumer name. func Bind(stream, consumer string) SubOpt { return subOptFn(func(opts *subOpts) error { if stream == _EMPTY_ { return ErrStreamNameRequired } if consumer == _EMPTY_ { return ErrConsumerNameRequired } // In case of pull subscribers, the durable name is a required parameter // so check that they are not different. if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) } if opts.stream != _EMPTY_ && opts.stream != stream { return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) } opts.stream = stream opts.consumer = consumer opts.bound = true return nil }) } // EnableFlowControl enables flow control for a push based consumer. func EnableFlowControl() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.FlowControl = true return nil }) } // IdleHeartbeat enables push based consumers to have idle heartbeats delivered. // For pull consumers, idle heartbeat has to be set on each [Fetch] call. func IdleHeartbeat(duration time.Duration) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.Heartbeat = duration return nil }) } // DeliverSubject specifies the JetStream consumer deliver subject. // // This option is used only in situations where the consumer does not exist // and a creation request is sent to the server. If not provided, an inbox // will be selected. // If a consumer exists, then the NATS subscription will be created on // the JetStream consumer's DeliverSubject, not necessarily this subject. func DeliverSubject(subject string) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.DeliverSubject = subject return nil }) } // HeadersOnly() will instruct the consumer to only deliver headers and no payloads. func HeadersOnly() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.HeadersOnly = true return nil }) } // MaxRequestBatch sets the maximum pull consumer batch size that a Fetch() // can request. func MaxRequestBatch(max int) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxRequestBatch = max return nil }) } // MaxRequestExpires sets the maximum pull consumer request expiration that a // Fetch() can request (using the Fetch's timeout value). func MaxRequestExpires(max time.Duration) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxRequestExpires = max return nil }) } // MaxRequesMaxBytes sets the maximum pull consumer request bytes that a // Fetch() can receive. func MaxRequestMaxBytes(bytes int) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxRequestMaxBytes = bytes return nil }) } // InactiveThreshold indicates how long the server should keep a consumer // after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this // option only applies to ephemeral consumers. In NATS Server 2.9.0 and later, // this option applies to both ephemeral and durable consumers, allowing durable // consumers to also be deleted automatically after the inactivity threshold has // passed. func InactiveThreshold(threshold time.Duration) SubOpt { return subOptFn(func(opts *subOpts) error { if threshold < 0 { return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold) } opts.cfg.InactiveThreshold = threshold return nil }) } // ConsumerReplicas sets the number of replica count for a consumer. func ConsumerReplicas(replicas int) SubOpt { return subOptFn(func(opts *subOpts) error { if replicas < 1 { return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas) } opts.cfg.Replicas = replicas return nil }) } // ConsumerMemoryStorage sets the memory storage to true for a consumer. func ConsumerMemoryStorage() SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MemoryStorage = true return nil }) } // ConsumerName sets the name for a consumer. func ConsumerName(name string) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.Name = name return nil }) } // ConsumerFilterSubjects can be used to set multiple subject filters on the consumer. // It has to be used in conjunction with [nats.BindStream] and // with empty 'subject' parameter. func ConsumerFilterSubjects(subjects ...string) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.FilterSubjects = subjects return nil }) } func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) { sub.mu.Lock() // TODO(dlc) - Better way to mark especially if we attach. if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { sub.mu.Unlock() return nil, ErrTypeSubscription } // Consumer info lookup should fail if in direct mode. js := sub.jsi.js stream, consumer := sub.jsi.stream, sub.jsi.consumer sub.mu.Unlock() return js.getConsumerInfo(stream, consumer) } type pullOpts struct { maxBytes int ttl time.Duration ctx context.Context hb time.Duration } // PullOpt are the options that can be passed when pulling a batch of messages. type PullOpt interface { configurePull(opts *pullOpts) error } // PullMaxWaiting defines the max inflight pull requests. func PullMaxWaiting(n int) SubOpt { return subOptFn(func(opts *subOpts) error { opts.cfg.MaxWaiting = n return nil }) } type PullHeartbeat time.Duration func (h PullHeartbeat) configurePull(opts *pullOpts) error { if h <= 0 { return fmt.Errorf("%w: idle heartbeat has to be greater than 0", ErrInvalidArg) } opts.hb = time.Duration(h) return nil } // PullMaxBytes defines the max bytes allowed for a fetch request. type PullMaxBytes int func (n PullMaxBytes) configurePull(opts *pullOpts) error { opts.maxBytes = int(n) return nil } var ( // errNoMessages is an error that a Fetch request using no_wait can receive to signal // that there are no more messages available. errNoMessages = errors.New("nats: no messages") // errRequestsPending is an error that represents a sub.Fetch requests that was using // no_wait and expires time got discarded by the server. errRequestsPending = errors.New("nats: requests pending") ) // Returns if the given message is a user message or not, and if // `checkSts` is true, returns appropriate error based on the // content of the status (404, etc..) func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { // Assume user message usrMsg = true // If payload or no header, consider this a user message if len(msg.Data) > 0 || len(msg.Header) == 0 { return } // Look for status header val := msg.Header.Get(statusHdr) // If not present, then this is considered a user message if val == _EMPTY_ { return } // At this point, this is not a user message since there is // no payload and a "Status" header. usrMsg = false // If we don't care about status, we are done. if !checkSts { return } // if it's a heartbeat message, report as not user msg if isHb, _ := isJSControlMessage(msg); isHb { return } switch val { case noResponders: err = ErrNoResponders case noMessagesSts: // 404 indicates that there are no messages. err = errNoMessages case reqTimeoutSts: // In case of a fetch request with no wait request and expires time, // need to skip 408 errors and retry. if isNoWait { err = errRequestsPending } else { // Older servers may send a 408 when a request in the server was expired // and interest is still found, which will be the case for our // implementation. Regardless, ignore 408 errors until receiving at least // one message when making requests without no_wait. err = ErrTimeout } case jetStream409Sts: if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "consumer deleted") { err = ErrConsumerDeleted break } if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "leadership change") { err = ErrConsumerLeadershipChanged break } fallthrough default: err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) } return } // Fetch pulls a batch of messages from a stream for a pull consumer. func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { if sub == nil { return nil, ErrBadSubscription } if batch < 1 { return nil, ErrInvalidArg } var o pullOpts for _, opt := range opts { if err := opt.configurePull(&o); err != nil { return nil, err } } if o.ctx != nil && o.ttl != 0 { return nil, ErrContextAndTimeout } sub.mu.Lock() jsi := sub.jsi // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, // so check for jsi.pull boolean instead. if jsi == nil || !jsi.pull { sub.mu.Unlock() return nil, ErrTypeSubscription } nc := sub.conn nms := sub.jsi.nms rply, _ := newFetchInbox(jsi.deliver) js := sub.jsi.js pmc := len(sub.mch) > 0 // All fetch requests have an expiration, in case of no explicit expiration // then the default timeout of the JetStream context is used. ttl := o.ttl if ttl == 0 { ttl = js.opts.wait } sub.mu.Unlock() // Use the given context or setup a default one for the span // of the pull batch request. var ( ctx = o.ctx err error cancel context.CancelFunc ) if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), ttl) } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { // Prevent from passing the background context which will just block // and cannot be canceled either. if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { return nil, ErrNoDeadlineContext } // If the context did not have a deadline, then create a new child context // that will use the default timeout from the JS context. ctx, cancel = context.WithTimeout(ctx, ttl) } else { ctx, cancel = context.WithCancel(ctx) } defer cancel() // if heartbeat is set, validate it against the context timeout if o.hb > 0 { deadline, _ := ctx.Deadline() if 2*o.hb >= time.Until(deadline) { return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) } } // Check if context not done already before making the request. select { case <-ctx.Done(): if o.ctx != nil { // Timeout or Cancel triggered by context object option err = ctx.Err() } else { // Timeout triggered by timeout option err = ErrTimeout } default: } if err != nil { return nil, err } var ( msgs = make([]*Msg, 0, batch) msg *Msg ) for pmc && len(msgs) < batch { // Check next msg with booleans that say that this is an internal call // for a pull subscribe (so don't reject it) and don't wait if there // are no messages. msg, err = sub.nextMsgWithContext(ctx, true, false) if err != nil { if errors.Is(err, errNoMessages) { err = nil } break } // Check msg but just to determine if this is a user message // or status message, however, we don't care about values of status // messages at this point in the Fetch() call, so checkMsg can't // return an error. if usrMsg, _ := checkMsg(msg, false, false); usrMsg { msgs = append(msgs, msg) } } var hbTimer *time.Timer defer func() { if hbTimer != nil { hbTimer.Stop() } }() var hbErr error sub.mu.Lock() subClosed := sub.closed || sub.draining sub.mu.Unlock() if subClosed { err = errors.Join(ErrBadSubscription, ErrSubscriptionClosed) } hbLock := sync.Mutex{} var disconnected atomic.Bool if err == nil && len(msgs) < batch && !subClosed { // For batch real size of 1, it does not make sense to set no_wait in // the request. noWait := batch-len(msgs) > 1 var nr nextRequest sendReq := func() error { // The current deadline for the context will be used // to set the expires TTL for a fetch request. deadline, _ := ctx.Deadline() ttl = time.Until(deadline) // Check if context has already been canceled or expired. select { case <-ctx.Done(): return ctx.Err() default: } // Make our request expiration a bit shorter than the current timeout. expiresDiff := time.Duration(float64(ttl) * 0.1) if expiresDiff > 5*time.Second { expiresDiff = 5 * time.Second } expires := ttl - expiresDiff nr.Batch = batch - len(msgs) nr.Expires = expires nr.NoWait = noWait nr.MaxBytes = o.maxBytes if 2*o.hb < expires { nr.Heartbeat = o.hb } else { nr.Heartbeat = 0 } req, _ := json.Marshal(nr) if err := nc.PublishRequest(nms, rply, req); err != nil { return err } if o.hb > 0 { if hbTimer == nil { hbTimer = time.AfterFunc(2*o.hb, func() { hbLock.Lock() hbErr = ErrNoHeartbeat hbLock.Unlock() cancel() }) } else { hbTimer.Reset(2 * o.hb) } } return nil } connStatusChanged := nc.StatusChanged() go func() { select { case <-ctx.Done(): return case <-connStatusChanged: disconnected.Store(true) cancel() return } }() err = sendReq() for err == nil && len(msgs) < batch { // Ask for next message and wait if there are no messages msg, err = sub.nextMsgWithContext(ctx, true, true) if err == nil { if hbTimer != nil { hbTimer.Reset(2 * o.hb) } var usrMsg bool usrMsg, err = checkMsg(msg, true, noWait) if err == nil && usrMsg { msgs = append(msgs, msg) } else if noWait && (errors.Is(err, errNoMessages) || errors.Is(err, errRequestsPending)) && len(msgs) == 0 { // If we have a 404/408 for our "no_wait" request and have // not collected any message, then resend request to // wait this time. noWait = false err = sendReq() } else if errors.Is(err, ErrTimeout) && len(msgs) == 0 { // If we get a 408, we will bail if we already collected some // messages, otherwise ignore and go back calling nextMsg. err = nil } } } } // If there is at least a message added to msgs, then need to return OK and no error if err != nil && len(msgs) == 0 { hbLock.Lock() defer hbLock.Unlock() if hbErr != nil { return nil, hbErr } if disconnected.Load() { return nil, ErrFetchDisconnected } return nil, o.checkCtxErr(err) } return msgs, nil } // newFetchInbox returns subject used as reply subject when sending pull requests // as well as request ID. For non-wildcard subject, request ID is empty and // passed subject is not transformed func newFetchInbox(subj string) (string, string) { if !strings.HasSuffix(subj, ".*") { return subj, "" } reqID := nuid.Next() var sb strings.Builder sb.WriteString(subj[:len(subj)-1]) sb.WriteString(reqID) return sb.String(), reqID } func subjectMatchesReqID(subject, reqID string) bool { subjectParts := strings.Split(subject, ".") if len(subjectParts) < 2 { return false } return subjectParts[len(subjectParts)-1] == reqID } // MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch]. type MessageBatch interface { // Messages returns a channel on which messages will be published. Messages() <-chan *Msg // Error returns an error encountered when fetching messages. Error() error // Done signals end of execution. Done() <-chan struct{} } type messageBatch struct { sync.Mutex msgs chan *Msg err error done chan struct{} } func (mb *messageBatch) Messages() <-chan *Msg { mb.Lock() defer mb.Unlock() return mb.msgs } func (mb *messageBatch) Error() error { mb.Lock() defer mb.Unlock() return mb.err } func (mb *messageBatch) Done() <-chan struct{} { mb.Lock() defer mb.Unlock() return mb.done } // FetchBatch pulls a batch of messages from a stream for a pull consumer. // Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch], // allowing to retrieve incoming messages from a channel. // The returned channel is always closed after all messages for a batch have been // delivered by the server - it is safe to iterate over it using range. // // To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait] // or [nats.Context] (with deadline set). // // This method will not return error in case of pull request expiry (even if there are no messages). // Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages. func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) { if sub == nil { return nil, ErrBadSubscription } if batch < 1 { return nil, ErrInvalidArg } var o pullOpts for _, opt := range opts { if err := opt.configurePull(&o); err != nil { return nil, err } } if o.ctx != nil && o.ttl != 0 { return nil, ErrContextAndTimeout } sub.mu.Lock() jsi := sub.jsi // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, // so check for jsi.pull boolean instead. if jsi == nil || !jsi.pull { sub.mu.Unlock() return nil, ErrTypeSubscription } nc := sub.conn nms := sub.jsi.nms rply, reqID := newFetchInbox(sub.jsi.deliver) js := sub.jsi.js pmc := len(sub.mch) > 0 // All fetch requests have an expiration, in case of no explicit expiration // then the default timeout of the JetStream context is used. ttl := o.ttl if ttl == 0 { ttl = js.opts.wait } sub.mu.Unlock() // Use the given context or setup a default one for the span // of the pull batch request. var ( ctx = o.ctx cancel context.CancelFunc cancelContext = true ) if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), ttl) } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { // Prevent from passing the background context which will just block // and cannot be canceled either. if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { return nil, ErrNoDeadlineContext } // If the context did not have a deadline, then create a new child context // that will use the default timeout from the JS context. ctx, cancel = context.WithTimeout(ctx, ttl) } else { ctx, cancel = context.WithCancel(ctx) } defer func() { // only cancel the context here if we are sure the fetching goroutine has not been started yet if cancelContext { cancel() } }() // if heartbeat is set, validate it against the context timeout if o.hb > 0 { deadline, _ := ctx.Deadline() if 2*o.hb >= time.Until(deadline) { return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) } } // Check if context not done already before making the request. select { case <-ctx.Done(): if o.ctx != nil { // Timeout or Cancel triggered by context object option return nil, ctx.Err() } else { // Timeout triggered by timeout option return nil, ErrTimeout } default: } result := &messageBatch{ msgs: make(chan *Msg, batch), done: make(chan struct{}, 1), } var msg *Msg for pmc && len(result.msgs) < batch { // Check next msg with booleans that say that this is an internal call // for a pull subscribe (so don't reject it) and don't wait if there // are no messages. msg, err := sub.nextMsgWithContext(ctx, true, false) if err != nil { if errors.Is(err, errNoMessages) { err = nil } result.err = err break } // Check msg but just to determine if this is a user message // or status message, however, we don't care about values of status // messages at this point in the Fetch() call, so checkMsg can't // return an error. if usrMsg, _ := checkMsg(msg, false, false); usrMsg { result.msgs <- msg } } sub.mu.Lock() subClosed := sub.closed || sub.draining sub.mu.Unlock() if len(result.msgs) == batch || result.err != nil || subClosed { close(result.msgs) if subClosed && len(result.msgs) == 0 { return nil, errors.Join(ErrBadSubscription, ErrSubscriptionClosed) } result.done <- struct{}{} return result, nil } deadline, _ := ctx.Deadline() ttl = time.Until(deadline) // Make our request expiration a bit shorter than the current timeout. expiresDiff := time.Duration(float64(ttl) * 0.1) if expiresDiff > 5*time.Second { expiresDiff = 5 * time.Second } expires := ttl - expiresDiff connStatusChanged := nc.StatusChanged() var disconnected atomic.Bool go func() { select { case <-ctx.Done(): return case <-connStatusChanged: disconnected.Store(true) cancel() return } }() requestBatch := batch - len(result.msgs) req := nextRequest{ Expires: expires, Batch: requestBatch, MaxBytes: o.maxBytes, Heartbeat: o.hb, } reqJSON, err := json.Marshal(req) if err != nil { close(result.msgs) result.done <- struct{}{} result.err = err return result, nil } if err := nc.PublishRequest(nms, rply, reqJSON); err != nil { if len(result.msgs) == 0 { return nil, err } close(result.msgs) result.done <- struct{}{} result.err = err return result, nil } var hbTimer *time.Timer defer func() { if hbTimer != nil { hbTimer.Stop() } }() var hbErr error if o.hb > 0 { hbTimer = time.AfterFunc(2*o.hb, func() { result.Lock() hbErr = ErrNoHeartbeat result.Unlock() cancel() }) } cancelContext = false go func() { defer cancel() var requestMsgs int for requestMsgs < requestBatch { // Ask for next message and wait if there are no messages msg, err = sub.nextMsgWithContext(ctx, true, true) if err != nil { break } if hbTimer != nil { hbTimer.Reset(2 * o.hb) } var usrMsg bool usrMsg, err = checkMsg(msg, true, false) if err != nil { if errors.Is(err, ErrTimeout) { if reqID != "" && !subjectMatchesReqID(msg.Subject, reqID) { // ignore timeout message from server if it comes from a different pull request continue } err = nil } break } if usrMsg { result.Lock() result.msgs <- msg result.Unlock() requestMsgs++ } } if err != nil { result.Lock() if hbErr != nil { result.err = hbErr } else if disconnected.Load() { result.err = ErrFetchDisconnected } else { result.err = o.checkCtxErr(err) } result.Unlock() } close(result.msgs) result.Lock() result.done <- struct{}{} result.Unlock() }() return result, nil } // checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout func (o *pullOpts) checkCtxErr(err error) error { if o.ctx == nil && errors.Is(err, context.DeadlineExceeded) { return ErrTimeout } return err } func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) { ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait) defer cancel() return js.getConsumerInfoContext(ctx, stream, consumer) } func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) { ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer) resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil) if err != nil { if errors.Is(err, ErrNoResponders) { err = ErrJetStreamNotEnabled } return nil, err } var info consumerResponse if err := json.Unmarshal(resp.Data, &info); err != nil { return nil, err } if info.Error != nil { if errors.Is(info.Error, ErrConsumerNotFound) { return nil, ErrConsumerNotFound } if errors.Is(info.Error, ErrStreamNotFound) { return nil, ErrStreamNotFound } return nil, info.Error } if info.Error == nil && info.ConsumerInfo == nil { return nil, ErrConsumerNotFound } return info.ConsumerInfo, nil } // a RequestWithContext with tracing via TraceCB func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { if js.opts.shouldTrace { ctrace := js.opts.ctrace if ctrace.RequestSent != nil { ctrace.RequestSent(subj, data) } } resp, err := js.nc.RequestWithContext(ctx, subj, data) if err != nil { return nil, err } if js.opts.shouldTrace { ctrace := js.opts.ctrace if ctrace.RequestSent != nil { ctrace.ResponseReceived(subj, resp.Data, resp.Header) } } return resp, nil } func (m *Msg) checkReply() error { if m == nil || m.Sub == nil { return ErrMsgNotBound } if m.Reply == _EMPTY_ { return ErrMsgNoReply } return nil } // ackReply handles all acks. Will do the right thing for pull and sync mode. // It ensures that an ack is only sent a single time, regardless of // how many times it is being called to avoid duplicated acks. func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error { var o ackOpts for _, opt := range opts { if err := opt.configureAck(&o); err != nil { return err } } if err := m.checkReply(); err != nil { return err } var ackNone bool var js *js sub := m.Sub sub.mu.Lock() nc := sub.conn if jsi := sub.jsi; jsi != nil { js = jsi.js ackNone = jsi.ackNone } sub.mu.Unlock() // Skip if already acked. if atomic.LoadUint32(&m.ackd) == 1 { return ErrMsgAlreadyAckd } if ackNone { return ErrCantAckIfConsumerAckNone } usesCtx := o.ctx != nil usesWait := o.ttl > 0 // Only allow either AckWait or Context option to set the timeout. if usesWait && usesCtx { return ErrContextAndTimeout } sync = sync || usesCtx || usesWait ctx := o.ctx wait := defaultRequestWait if usesWait { wait = o.ttl } else if js != nil { wait = js.opts.wait } var body []byte var err error // This will be > 0 only when called from NakWithDelay() if o.nakDelay > 0 { body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds())) } else { body = ackType } if sync { if usesCtx { _, err = nc.RequestWithContext(ctx, m.Reply, body) } else { _, err = nc.Request(m.Reply, body, wait) } } else { err = nc.Publish(m.Reply, body) } // Mark that the message has been acked unless it is ackProgress // which can be sent many times. if err == nil && !bytes.Equal(ackType, ackProgress) { atomic.StoreUint32(&m.ackd, 1) } return err } // Ack acknowledges a message. This tells the server that the message was // successfully processed and it can move on to the next message. func (m *Msg) Ack(opts ...AckOpt) error { return m.ackReply(ackAck, false, opts...) } // AckSync is the synchronous version of Ack. This indicates successful message // processing. func (m *Msg) AckSync(opts ...AckOpt) error { return m.ackReply(ackAck, true, opts...) } // Nak negatively acknowledges a message. This tells the server to redeliver // the message. You can configure the number of redeliveries by passing // nats.MaxDeliver when you Subscribe. The default is infinite redeliveries. func (m *Msg) Nak(opts ...AckOpt) error { return m.ackReply(ackNak, false, opts...) } // Nak negatively acknowledges a message. This tells the server to redeliver // the message after the give `delay` duration. You can configure the number // of redeliveries by passing nats.MaxDeliver when you Subscribe. // The default is infinite redeliveries. func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error { if delay > 0 { opts = append(opts, nakDelay(delay)) } return m.ackReply(ackNak, false, opts...) } // Term tells the server to not redeliver this message, regardless of the value // of nats.MaxDeliver. func (m *Msg) Term(opts ...AckOpt) error { return m.ackReply(ackTerm, false, opts...) } // InProgress tells the server that this message is being worked on. It resets // the redelivery timer on the server. func (m *Msg) InProgress(opts ...AckOpt) error { return m.ackReply(ackProgress, false, opts...) } // MsgMetadata is the JetStream metadata associated with received messages. type MsgMetadata struct { Sequence SequencePair NumDelivered uint64 NumPending uint64 Timestamp time.Time Stream string Consumer string Domain string } // Metadata retrieves the metadata from a JetStream message. This method will // return an error for non-JetStream Msgs. func (m *Msg) Metadata() (*MsgMetadata, error) { if err := m.checkReply(); err != nil { return nil, err } tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { return nil, err } meta := &MsgMetadata{ Domain: tokens[parser.AckDomainTokenPos], NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), Stream: tokens[parser.AckStreamTokenPos], Consumer: tokens[parser.AckConsumerTokenPos], } meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) return meta, nil } // AckPolicy determines how the consumer should acknowledge delivered messages. type AckPolicy int const ( // AckNonePolicy requires no acks for delivered messages. AckNonePolicy AckPolicy = iota // AckAllPolicy when acking a sequence number, this implicitly acks all // sequences below this one as well. AckAllPolicy // AckExplicitPolicy requires ack or nack for all messages. AckExplicitPolicy // For configuration mismatch check ackPolicyNotSet = 99 ) func jsonString(s string) string { return "\"" + s + "\"" } func (p *AckPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("none"): *p = AckNonePolicy case jsonString("all"): *p = AckAllPolicy case jsonString("explicit"): *p = AckExplicitPolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p AckPolicy) MarshalJSON() ([]byte, error) { switch p { case AckNonePolicy: return json.Marshal("none") case AckAllPolicy: return json.Marshal("all") case AckExplicitPolicy: return json.Marshal("explicit") default: return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) } } func (p AckPolicy) String() string { switch p { case AckNonePolicy: return "AckNone" case AckAllPolicy: return "AckAll" case AckExplicitPolicy: return "AckExplicit" case ackPolicyNotSet: return "Not Initialized" default: return "Unknown AckPolicy" } } // ReplayPolicy determines how the consumer should replay messages it already has queued in the stream. type ReplayPolicy int const ( // ReplayInstantPolicy will replay messages as fast as possible. ReplayInstantPolicy ReplayPolicy = iota // ReplayOriginalPolicy will maintain the same timing as the messages were received. ReplayOriginalPolicy // For configuration mismatch check replayPolicyNotSet = 99 ) func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("instant"): *p = ReplayInstantPolicy case jsonString("original"): *p = ReplayOriginalPolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (p ReplayPolicy) MarshalJSON() ([]byte, error) { switch p { case ReplayOriginalPolicy: return json.Marshal("original") case ReplayInstantPolicy: return json.Marshal("instant") default: return nil, fmt.Errorf("nats: unknown replay policy %v", p) } } var ( ackAck = []byte("+ACK") ackNak = []byte("-NAK") ackProgress = []byte("+WPI") ackTerm = []byte("+TERM") ) // DeliverPolicy determines how the consumer should select the first message to deliver. type DeliverPolicy int const ( // DeliverAllPolicy starts delivering messages from the very beginning of a // stream. This is the default. DeliverAllPolicy DeliverPolicy = iota // DeliverLastPolicy will start the consumer with the last sequence // received. DeliverLastPolicy // DeliverNewPolicy will only deliver new messages that are sent after the // consumer is created. DeliverNewPolicy // DeliverByStartSequencePolicy will deliver messages starting from a given // sequence. DeliverByStartSequencePolicy // DeliverByStartTimePolicy will deliver messages starting from a given // time. DeliverByStartTimePolicy // DeliverLastPerSubjectPolicy will start the consumer with the last message // for all subjects received. DeliverLastPerSubjectPolicy // For configuration mismatch check deliverPolicyNotSet = 99 ) func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString("all"), jsonString("undefined"): *p = DeliverAllPolicy case jsonString("last"): *p = DeliverLastPolicy case jsonString("new"): *p = DeliverNewPolicy case jsonString("by_start_sequence"): *p = DeliverByStartSequencePolicy case jsonString("by_start_time"): *p = DeliverByStartTimePolicy case jsonString("last_per_subject"): *p = DeliverLastPerSubjectPolicy } return nil } func (p DeliverPolicy) MarshalJSON() ([]byte, error) { switch p { case DeliverAllPolicy: return json.Marshal("all") case DeliverLastPolicy: return json.Marshal("last") case DeliverNewPolicy: return json.Marshal("new") case DeliverByStartSequencePolicy: return json.Marshal("by_start_sequence") case DeliverByStartTimePolicy: return json.Marshal("by_start_time") case DeliverLastPerSubjectPolicy: return json.Marshal("last_per_subject") default: return nil, fmt.Errorf("nats: unknown deliver policy %v", p) } } // RetentionPolicy determines how messages in a set are retained. type RetentionPolicy int const ( // LimitsPolicy (default) means that messages are retained until any given limit is reached. // This could be one of MaxMsgs, MaxBytes, or MaxAge. LimitsPolicy RetentionPolicy = iota // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed. InterestPolicy // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. WorkQueuePolicy ) // DiscardPolicy determines how to proceed when limits of messages or bytes are // reached. type DiscardPolicy int const ( // DiscardOld will remove older messages to return to the limits. This is // the default. DiscardOld DiscardPolicy = iota //DiscardNew will fail to store new messages. DiscardNew ) const ( limitsPolicyString = "limits" interestPolicyString = "interest" workQueuePolicyString = "workqueue" ) func (rp RetentionPolicy) String() string { switch rp { case LimitsPolicy: return "Limits" case InterestPolicy: return "Interest" case WorkQueuePolicy: return "WorkQueue" default: return "Unknown Retention Policy" } } func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { switch rp { case LimitsPolicy: return json.Marshal(limitsPolicyString) case InterestPolicy: return json.Marshal(interestPolicyString) case WorkQueuePolicy: return json.Marshal(workQueuePolicyString) default: return nil, fmt.Errorf("nats: can not marshal %v", rp) } } func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString(limitsPolicyString): *rp = LimitsPolicy case jsonString(interestPolicyString): *rp = InterestPolicy case jsonString(workQueuePolicyString): *rp = WorkQueuePolicy default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } func (dp DiscardPolicy) String() string { switch dp { case DiscardOld: return "DiscardOld" case DiscardNew: return "DiscardNew" default: return "Unknown Discard Policy" } } func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { switch dp { case DiscardOld: return json.Marshal("old") case DiscardNew: return json.Marshal("new") default: return nil, fmt.Errorf("nats: can not marshal %v", dp) } } func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { switch strings.ToLower(string(data)) { case jsonString("old"): *dp = DiscardOld case jsonString("new"): *dp = DiscardNew default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } // StorageType determines how messages are stored for retention. type StorageType int const ( // FileStorage specifies on disk storage. It's the default. FileStorage StorageType = iota // MemoryStorage specifies in memory only. MemoryStorage ) const ( memoryStorageString = "memory" fileStorageString = "file" ) func (st StorageType) String() string { switch st { case MemoryStorage: return "Memory" case FileStorage: return "File" default: return "Unknown Storage Type" } } func (st StorageType) MarshalJSON() ([]byte, error) { switch st { case MemoryStorage: return json.Marshal(memoryStorageString) case FileStorage: return json.Marshal(fileStorageString) default: return nil, fmt.Errorf("nats: can not marshal %v", st) } } func (st *StorageType) UnmarshalJSON(data []byte) error { switch string(data) { case jsonString(memoryStorageString): *st = MemoryStorage case jsonString(fileStorageString): *st = FileStorage default: return fmt.Errorf("nats: can not unmarshal %q", data) } return nil } type StoreCompression uint8 const ( NoCompression StoreCompression = iota S2Compression ) func (alg StoreCompression) String() string { switch alg { case NoCompression: return "None" case S2Compression: return "S2" default: return "Unknown StoreCompression" } } func (alg StoreCompression) MarshalJSON() ([]byte, error) { var str string switch alg { case S2Compression: str = "s2" case NoCompression: str = "none" default: return nil, errors.New("unknown compression algorithm") } return json.Marshal(str) } func (alg *StoreCompression) UnmarshalJSON(b []byte) error { var str string if err := json.Unmarshal(b, &str); err != nil { return err } switch str { case "s2": *alg = S2Compression case "none": *alg = NoCompression default: return errors.New("unknown compression algorithm") } return nil } // Length of our hash used for named consumers. const nameHashLen = 8 // Computes a hash for the given `name`. func getHash(name string) string { sha := sha256.New() sha.Write([]byte(name)) b := sha.Sum(nil) for i := 0; i < nameHashLen; i++ { b[i] = rdigits[int(b[i]%base)] } return string(b[:nameHashLen]) } nats.go-1.41.0/js_test.go000066400000000000000000000054141477351342400151660ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats //////////////////////////////////////////////////////////////////////////////// // Package scoped specific tests here.. //////////////////////////////////////////////////////////////////////////////// import ( "strings" "testing" ) func TestJetStreamConvertDirectMsgResponseToMsg(t *testing.T) { // This test checks the conversion of a "direct get message" response // to a JS message based on the content of specific NATS headers. // It is very specific to the order headers retrieval is made in // convertDirectGetMsgResponseToMsg(), so it may need adjustment // if changes are made there. msg := NewMsg("inbox") check := func(errTxt string) { t.Helper() m, err := convertDirectGetMsgResponseToMsg("test", msg) if err == nil || !strings.Contains(err.Error(), errTxt) { t.Fatalf("Expected error contain %q, got %v", errTxt, err) } if m != nil { t.Fatalf("Expected nil message, got %v", m) } } check("should have headers") msg.Header.Set(statusHdr, noMessagesSts) check(ErrMsgNotFound.Error()) msg.Header.Set(statusHdr, reqTimeoutSts) check("unable to get message") msg.Header.Set(descrHdr, "some error text") check("some error text") msg.Header.Del(statusHdr) msg.Header.Del(descrHdr) msg.Header.Set("some", "header") check("missing stream") msg.Header.Set(JSStream, "test") check("missing sequence") msg.Header.Set(JSSequence, "abc") check("invalid sequence") msg.Header.Set(JSSequence, "1") check("missing timestamp") msg.Header.Set(JSTimeStamp, "aaaaaaaaa bbbbbbbbbbbb cccccccccc ddddddddddd eeeeeeeeee ffffff") check("invalid timestamp") msg.Header.Set(JSTimeStamp, "2006-01-02 15:04:05.999999999 +0000 UTC") check("missing subject") msg.Header.Set(JSSubject, "foo") r, err := convertDirectGetMsgResponseToMsg("test", msg) if err != nil { t.Fatalf("Error during convert: %v", err) } if r.Subject != "foo" { t.Fatalf("Expected subject to be 'foo', got %q", r.Subject) } if r.Sequence != 1 { t.Fatalf("Expected sequence to be 1, got %v", r.Sequence) } if r.Time.UnixNano() != 0xFC4A4D639917BFF { t.Fatalf("Invalid timestamp: %v", r.Time.UnixNano()) } if r.Header.Get("some") != "header" { t.Fatalf("Wrong header: %v", r.Header) } } nats.go-1.41.0/jserrors.go000066400000000000000000000321421477351342400153620ustar00rootroot00000000000000// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "errors" "fmt" ) var ( // API errors // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account. // // Note: This error will not be returned in clustered mode, even if each // server in the cluster does not have JetStream enabled. In clustered mode, // requests will time out instead. ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account. ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} // ErrStreamNotFound is an error returned when stream with given name does not exist. ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration. ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} // ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting // the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid // configuration was already created in the server. ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} // ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting // the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid // configuration was already created in the server. ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} // ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting // the stream sources. If this error is returned when executing AddStream(), the stream with invalid // configuration was already created in the server. ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} // ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting // the stream sources. If this error is returned when executing AddStream(), the stream with invalid // configuration was already created in the server. ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject transforms not supported by nats-server"} // ErrConsumerNotFound is an error returned when consumer with given name does not exist. ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} // ErrMsgNotFound is returned when message with provided sequence number does npt exist. ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} // ErrBadRequest is returned when invalid request is sent to JetStream API. ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} // ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer. ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} // ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer. ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} // Client errors // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists. ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} // ErrConsumerNotActive is an error returned when consumer is not active. ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"} // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid. ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream. ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"} // ErrStreamNameRequired is returned when the provided stream name is empty. ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} // ErrConsumerNameRequired is returned when the provided consumer durable name is empty. ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"} // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting // multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid // configuration was already created in the server. ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer. ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"} // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer. ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"} // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers. ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"} // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once. ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error). ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message . ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' '). ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' '). ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} // ErrInvalidFilterSubject is returned when the provided filter subject is invalid. ErrInvalidFilterSubject JetStreamError = &jsError{message: "invalid filter subject"} // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful. ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"} // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject. ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"} // ErrContextAndTimeout is returned when attempting to use both context and timeout. ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"} // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set. ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"} // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"} // ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer. ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} // ErrSubscriptionClosed is returned when attempting to send pull request to a closed subscription ErrSubscriptionClosed JetStreamError = &jsError{message: "subscription closed"} // ErrJetStreamPublisherClosed is returned for each unfinished ack future when JetStream.Cleanup is called. ErrJetStreamPublisherClosed JetStreamError = &jsError{message: "jetstream context closed"} // Deprecated: ErrInvalidDurableName is no longer returned and will be removed in future releases. // Use ErrInvalidConsumerName instead. ErrInvalidDurableName = errors.New("nats: invalid durable name") // ErrAsyncPublishTimeout is returned when waiting for ack on async publish ErrAsyncPublishTimeout JetStreamError = &jsError{message: "timeout waiting for ack"} // ErrTooManyStalledMsgs is returned when too many outstanding async // messages are waiting for ack. ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"} // ErrFetchDisconnected is returned when the connection to the server is lost // while waiting for messages to be delivered on PullSubscribe. ErrFetchDisconnected = &jsError{message: "disconnected during fetch"} ) // Error code represents JetStream error codes returned by the API type ErrorCode uint16 const ( JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 JSErrCodeJetStreamNotEnabled ErrorCode = 10076 JSErrCodeInsufficientResourcesErr ErrorCode = 10023 JSErrCodeJetStreamNotAvailable ErrorCode = 10008 JSErrCodeStreamNotFound ErrorCode = 10059 JSErrCodeStreamNameInUse ErrorCode = 10058 JSErrCodeConsumerNotFound ErrorCode = 10014 JSErrCodeConsumerNameExists ErrorCode = 10013 JSErrCodeConsumerAlreadyExists ErrorCode = 10105 JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 JSErrCodeConsumerEmptyFilter ErrorCode = 10139 JSErrCodeMessageNotFound ErrorCode = 10037 JSErrCodeBadRequest ErrorCode = 10003 JSStreamInvalidConfig ErrorCode = 10052 JSErrCodeStreamWrongLastSequence ErrorCode = 10071 ) // APIError is included in all API responses if there was an error. type APIError struct { Code int `json:"code"` ErrorCode ErrorCode `json:"err_code"` Description string `json:"description,omitempty"` } // Error prints the JetStream API error code and description func (e *APIError) Error() string { return fmt.Sprintf("nats: %s", e.Description) } // APIError implements the JetStreamError interface. func (e *APIError) APIError() *APIError { return e } // Is matches against an APIError. func (e *APIError) Is(err error) bool { if e == nil { return false } // Extract internal APIError to match against. var aerr *APIError ok := errors.As(err, &aerr) if !ok { return ok } return e.ErrorCode == aerr.ErrorCode } // JetStreamError is an error result that happens when using JetStream. // In case of client-side error, `APIError()` returns nil type JetStreamError interface { APIError() *APIError error } type jsError struct { apiErr *APIError message string } func (err *jsError) APIError() *APIError { return err.apiErr } func (err *jsError) Error() string { if err.apiErr != nil && err.apiErr.Description != "" { return err.apiErr.Error() } return fmt.Sprintf("nats: %s", err.message) } func (err *jsError) Unwrap() error { // Allow matching to embedded APIError in case there is one. if err.apiErr == nil { return nil } return err.apiErr } nats.go-1.41.0/jsm.go000066400000000000000000001430171477351342400143060ustar00rootroot00000000000000// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "context" "encoding/json" "errors" "fmt" "strconv" "strings" "time" ) // JetStreamManager manages JetStream Streams and Consumers. type JetStreamManager interface { // AddStream creates a stream. AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) // UpdateStream updates a stream. UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) // DeleteStream deletes a stream. DeleteStream(name string, opts ...JSOpt) error // StreamInfo retrieves information from a stream. StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) // PurgeStream purges a stream messages. PurgeStream(name string, opts ...JSOpt) error // StreamsInfo can be used to retrieve a list of StreamInfo objects. // Deprecated: Use Streams() instead. StreamsInfo(opts ...JSOpt) <-chan *StreamInfo // Streams can be used to retrieve a list of StreamInfo objects. Streams(opts ...JSOpt) <-chan *StreamInfo // StreamNames is used to retrieve a list of Stream names. StreamNames(opts ...JSOpt) <-chan string // GetMsg retrieves a raw stream message stored in JetStream by sequence number. // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval // directly from a distributed group of servers (leader and replicas). // The stream must have been created/updated with the AllowDirect boolean. GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. // Use option nats.DirectGet() to trigger retrieval // directly from a distributed group of servers (leader and replicas). // The stream must have been created/updated with the AllowDirect boolean. GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten. DeleteMsg(name string, seq uint64, opts ...JSOpt) error // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data // As a result, this operation is slower than DeleteMsg() SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error // AddConsumer adds a consumer to a stream. // If the consumer already exists, and the configuration is the same, it // will return the existing consumer. // If the consumer already exists, and the configuration is different, it // will return ErrConsumerNameAlreadyInUse. AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) // UpdateConsumer updates an existing consumer. UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) // DeleteConsumer deletes a consumer. DeleteConsumer(stream, consumer string, opts ...JSOpt) error // ConsumerInfo retrieves information of a consumer from a stream. ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error) // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. // Deprecated: Use Consumers() instead. ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo // Consumers is used to retrieve a list of ConsumerInfo objects. Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo // ConsumerNames is used to retrieve a list of Consumer names. ConsumerNames(stream string, opts ...JSOpt) <-chan string // AccountInfo retrieves info about the JetStream usage from an account. AccountInfo(opts ...JSOpt) (*AccountInfo, error) // StreamNameBySubject returns a stream matching given subject. StreamNameBySubject(string, ...JSOpt) (string, error) } // StreamConfig will determine the properties for a stream. // There are sensible defaults for most. If no subjects are // given the name will be used as the only subject. type StreamConfig struct { // Name is the name of the stream. It is required and must be unique // across the JetStream account. // // Name Names cannot contain whitespace, ., *, >, path separators // (forward or backwards slash), and non-printable characters. Name string `json:"name"` // Description is an optional description of the stream. Description string `json:"description,omitempty"` // Subjects is a list of subjects that the stream is listening on. // Wildcards are supported. Subjects cannot be set if the stream is // created as a mirror. Subjects []string `json:"subjects,omitempty"` // Retention defines the message retention policy for the stream. // Defaults to LimitsPolicy. Retention RetentionPolicy `json:"retention"` // MaxConsumers specifies the maximum number of consumers allowed for // the stream. MaxConsumers int `json:"max_consumers"` // MaxMsgs is the maximum number of messages the stream will store. // After reaching the limit, stream adheres to the discard policy. // If not set, server default is -1 (unlimited). MaxMsgs int64 `json:"max_msgs"` // MaxBytes is the maximum total size of messages the stream will store. // After reaching the limit, stream adheres to the discard policy. // If not set, server default is -1 (unlimited). MaxBytes int64 `json:"max_bytes"` // Discard defines the policy for handling messages when the stream // reaches its limits in terms of number of messages or total bytes. Discard DiscardPolicy `json:"discard"` // DiscardNewPerSubject is a flag to enable discarding new messages per // subject when limits are reached. Requires DiscardPolicy to be // DiscardNew and the MaxMsgsPerSubject to be set. DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` // MaxAge is the maximum age of messages that the stream will retain. MaxAge time.Duration `json:"max_age"` // MaxMsgsPerSubject is the maximum number of messages per subject that // the stream will retain. MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` // MaxMsgSize is the maximum size of any single message in the stream. MaxMsgSize int32 `json:"max_msg_size,omitempty"` // Storage specifies the type of storage backend used for the stream // (file or memory). Storage StorageType `json:"storage"` // Replicas is the number of stream replicas in clustered JetStream. // Defaults to 1, maximum is 5. Replicas int `json:"num_replicas"` // NoAck is a flag to disable acknowledging messages received by this // stream. // // If set to true, publish methods from the JetStream client will not // work as expected, since they rely on acknowledgements. Core NATS // publish methods should be used instead. Note that this will make // message delivery less reliable. NoAck bool `json:"no_ack,omitempty"` // Duplicates is the window within which to track duplicate messages. // If not set, server default is 2 minutes. Duplicates time.Duration `json:"duplicate_window,omitempty"` // Placement is used to declare where the stream should be placed via // tags and/or an explicit cluster name. Placement *Placement `json:"placement,omitempty"` // Mirror defines the configuration for mirroring another stream. Mirror *StreamSource `json:"mirror,omitempty"` // Sources is a list of other streams this stream sources messages from. Sources []*StreamSource `json:"sources,omitempty"` // Sealed streams do not allow messages to be published or deleted via limits or API, // sealed streams can not be unsealed via configuration update. Can only // be set on already created streams via the Update API. Sealed bool `json:"sealed,omitempty"` // DenyDelete restricts the ability to delete messages from a stream via // the API. Defaults to false. DenyDelete bool `json:"deny_delete,omitempty"` // DenyPurge restricts the ability to purge messages from a stream via // the API. Defaults to false. DenyPurge bool `json:"deny_purge,omitempty"` // AllowRollup allows the use of the Nats-Rollup header to replace all // contents of a stream, or subject in a stream, with a single new // message. AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` // Compression specifies the message storage compression algorithm. // Defaults to NoCompression. Compression StoreCompression `json:"compression"` // FirstSeq is the initial sequence number of the first message in the // stream. FirstSeq uint64 `json:"first_seq,omitempty"` // SubjectTransform allows applying a transformation to matching // messages' subjects. SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` // RePublish allows immediate republishing a message to the configured // subject after it's stored. RePublish *RePublish `json:"republish,omitempty"` // AllowDirect enables direct access to individual messages using direct // get API. Defaults to false. AllowDirect bool `json:"allow_direct"` // MirrorDirect enables direct access to individual messages from the // origin stream using direct get API. Defaults to false. MirrorDirect bool `json:"mirror_direct"` // ConsumerLimits defines limits of certain values that consumers can // set, defaults for those who don't set these settings ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` // Metadata is a set of application-defined key-value pairs for // associating metadata on the stream. This feature requires nats-server // v2.10.0 or later. Metadata map[string]string `json:"metadata,omitempty"` // Template identifies the template that manages the Stream. Deprecated: // This feature is no longer supported. Template string `json:"template_owner,omitempty"` // AllowMsgTTL allows header initiated per-message TTLs. // This feature requires nats-server v2.11.0 or later. AllowMsgTTL bool `json:"allow_msg_ttl"` // Enables and sets a duration for adding server markers for delete, purge and max age limits. // This feature requires nats-server v2.11.0 or later. SubjectDeleteMarkerTTL time.Duration `json:"subject_delete_marker_ttl,omitempty"` } // SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received. type SubjectTransformConfig struct { Source string `json:"src,omitempty"` Destination string `json:"dest"` } // RePublish is for republishing messages once committed to a stream. The original // subject cis remapped from the subject pattern to the destination pattern. type RePublish struct { Source string `json:"src,omitempty"` Destination string `json:"dest"` HeadersOnly bool `json:"headers_only,omitempty"` } // Placement is used to guide placement of streams in clustered JetStream. type Placement struct { Cluster string `json:"cluster"` Tags []string `json:"tags,omitempty"` } // StreamSource dictates how streams can source from other streams. type StreamSource struct { Name string `json:"name"` OptStartSeq uint64 `json:"opt_start_seq,omitempty"` OptStartTime *time.Time `json:"opt_start_time,omitempty"` FilterSubject string `json:"filter_subject,omitempty"` SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` External *ExternalStream `json:"external,omitempty"` Domain string `json:"-"` } // ExternalStream allows you to qualify access to a stream source in another // account. type ExternalStream struct { APIPrefix string `json:"api"` DeliverPrefix string `json:"deliver,omitempty"` } // StreamConsumerLimits are the limits for a consumer on a stream. // These can be overridden on a per consumer basis. type StreamConsumerLimits struct { InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` MaxAckPending int `json:"max_ack_pending,omitempty"` } // Helper for copying when we do not want to change user's version. func (ss *StreamSource) copy() *StreamSource { nss := *ss // Check pointers if ss.OptStartTime != nil { t := *ss.OptStartTime nss.OptStartTime = &t } if ss.External != nil { ext := *ss.External nss.External = &ext } return &nss } // If we have a Domain, convert to the appropriate ext.APIPrefix. // This will change the stream source, so should be a copy passed in. func (ss *StreamSource) convertDomain() error { if ss.Domain == _EMPTY_ { return nil } if ss.External != nil { // These should be mutually exclusive. // TODO(dlc) - Make generic? return errors.New("nats: domain and external are both set") } ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} return nil } // apiResponse is a standard response from the JetStream JSON API type apiResponse struct { Type string `json:"type"` Error *APIError `json:"error,omitempty"` } // apiPaged includes variables used to create paged responses from the JSON API type apiPaged struct { Total int `json:"total"` Offset int `json:"offset"` Limit int `json:"limit"` } // apiPagedRequest includes parameters allowing specific pages to be requested // from APIs responding with apiPaged. type apiPagedRequest struct { Offset int `json:"offset,omitempty"` } // AccountInfo contains info about the JetStream usage from the current account. type AccountInfo struct { Tier Domain string `json:"domain"` API APIStats `json:"api"` Tiers map[string]Tier `json:"tiers"` } type Tier struct { Memory uint64 `json:"memory"` Store uint64 `json:"storage"` ReservedMemory uint64 `json:"reserved_memory"` ReservedStore uint64 `json:"reserved_storage"` Streams int `json:"streams"` Consumers int `json:"consumers"` Limits AccountLimits `json:"limits"` } // APIStats reports on API calls to JetStream for this account. type APIStats struct { Total uint64 `json:"total"` Errors uint64 `json:"errors"` } // AccountLimits includes the JetStream limits of the current account. type AccountLimits struct { MaxMemory int64 `json:"max_memory"` MaxStore int64 `json:"max_storage"` MaxStreams int `json:"max_streams"` MaxConsumers int `json:"max_consumers"` MaxAckPending int `json:"max_ack_pending"` MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"` StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"` MaxBytesRequired bool `json:"max_bytes_required"` } type accountInfoResponse struct { apiResponse AccountInfo } // AccountInfo fetches account information from the server, containing details // about the account associated with this JetStream connection. If account is // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. // // If the server does not have JetStream enabled, ErrJetStreamNotEnabled is // returned (for a single server setup). For clustered topologies, AccountInfo // will time out. func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) if err != nil { // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had if errors.Is(err, ErrNoResponders) { err = ErrJetStreamNotEnabled } return nil, err } var info accountInfoResponse if err := json.Unmarshal(resp.Data, &info); err != nil { return nil, err } if info.Error != nil { // Internally checks based on error code instead of description match. if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) { return nil, ErrJetStreamNotEnabledForAccount } return nil, info.Error } return &info.AccountInfo, nil } type createConsumerRequest struct { Stream string `json:"stream_name"` Config *ConsumerConfig `json:"config"` } type consumerResponse struct { apiResponse *ConsumerInfo } // AddConsumer adds a consumer to a stream. // If the consumer already exists, and the configuration is the same, it // will return the existing consumer. // If the consumer already exists, and the configuration is different, it // will return ErrConsumerNameAlreadyInUse. func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { if cfg == nil { cfg = &ConsumerConfig{} } consumerName := cfg.Name if consumerName == _EMPTY_ { consumerName = cfg.Durable } if consumerName != _EMPTY_ { consInfo, err := js.ConsumerInfo(stream, consumerName, opts...) if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) { return nil, err } if consInfo != nil { sameConfig := checkConfig(&consInfo.Config, cfg) if sameConfig != nil { return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream) } else { return consInfo, nil } } } return js.upsertConsumer(stream, consumerName, cfg, opts...) } func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { if cfg == nil { return nil, ErrConsumerConfigRequired } consumerName := cfg.Name if consumerName == _EMPTY_ { consumerName = cfg.Durable } if consumerName == _EMPTY_ { return nil, ErrConsumerNameRequired } return js.upsertConsumer(stream, consumerName, cfg, opts...) } func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { if err := checkStreamName(stream); err != nil { return nil, err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) if err != nil { return nil, err } var ccSubj string if consumerName == _EMPTY_ { // if consumer name is empty (neither Durable nor Name is set), use the legacy ephemeral endpoint ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) } else if err := checkConsumerName(consumerName); err != nil { return nil, err } else if js.nc.serverMinVersion(2, 9, 0) { if cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate { // if user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) } else if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" { // if filter subject is empty or ">", use the endpoint without filter subject ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) } else { // safeguard against passing invalid filter subject in request subject if cfg.FilterSubject[0] == '.' || cfg.FilterSubject[len(cfg.FilterSubject)-1] == '.' { return nil, fmt.Errorf("%w: %q", ErrInvalidFilterSubject, cfg.FilterSubject) } // if filter subject is not empty, use the endpoint with filter subject ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) } } else { if cfg.Durable != "" { // if Durable is set, use the DURABLE.CREATE endpoint ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) } else { // if Durable is not set, use the legacy ephemeral endpoint ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) } } resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req) if err != nil { if errors.Is(err, ErrNoResponders) { err = ErrJetStreamNotEnabled } return nil, err } var info consumerResponse err = json.Unmarshal(resp.Data, &info) if err != nil { return nil, err } if info.Error != nil { if errors.Is(info.Error, ErrStreamNotFound) { return nil, ErrStreamNotFound } if errors.Is(info.Error, ErrConsumerNotFound) { return nil, ErrConsumerNotFound } return nil, info.Error } // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo if len(cfg.FilterSubjects) != 0 && len(info.Config.FilterSubjects) == 0 { return nil, ErrConsumerMultipleFilterSubjectsNotSupported } return info.ConsumerInfo, nil } // consumerDeleteResponse is the response for a Consumer delete request. type consumerDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } func checkStreamName(stream string) error { if stream == _EMPTY_ { return ErrStreamNameRequired } if strings.ContainsAny(stream, ". ") { return ErrInvalidStreamName } return nil } // Check that the consumer name is not empty and is valid (does not contain "." and " "). // Additional consumer name validation is done in nats-server. // Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil func checkConsumerName(consumer string) error { if consumer == _EMPTY_ { return ErrConsumerNameRequired } if strings.ContainsAny(consumer, ". ") { return ErrInvalidConsumerName } return nil } // DeleteConsumer deletes a Consumer. func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { if err := checkStreamName(stream); err != nil { return err } if err := checkConsumerName(consumer); err != nil { return err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err } if cancel != nil { defer cancel() } dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) if err != nil { return err } var resp consumerDeleteResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return err } if resp.Error != nil { if errors.Is(resp.Error, ErrConsumerNotFound) { return ErrConsumerNotFound } return resp.Error } return nil } // ConsumerInfo returns information about a Consumer. func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { if err := checkStreamName(stream); err != nil { return nil, err } if err := checkConsumerName(consumer); err != nil { return nil, err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } return js.getConsumerInfoContext(o.ctx, stream, consumer) } // consumerLister fetches pages of ConsumerInfo objects. This object is not // safe to use for multiple threads. type consumerLister struct { stream string js *js err error offset int page []*ConsumerInfo pageInfo *apiPaged } // consumersRequest is the type used for Consumers requests. type consumersRequest struct { apiPagedRequest } // consumerListResponse is the response for a Consumers List request. type consumerListResponse struct { apiResponse apiPaged Consumers []*ConsumerInfo `json:"consumers"` } // Next fetches the next ConsumerInfo page. func (c *consumerLister) Next() bool { if c.err != nil { return false } if err := checkStreamName(c.stream); err != nil { c.err = err return false } if c.pageInfo != nil && c.offset >= c.pageInfo.Total { return false } req, err := json.Marshal(consumersRequest{ apiPagedRequest: apiPagedRequest{Offset: c.offset}, }) if err != nil { c.err = err return false } var cancel context.CancelFunc ctx := c.js.opts.ctx if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) defer cancel() } clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream)) r, err := c.js.apiRequestWithContext(ctx, clSubj, req) if err != nil { c.err = err return false } var resp consumerListResponse if err := json.Unmarshal(r.Data, &resp); err != nil { c.err = err return false } if resp.Error != nil { c.err = resp.Error return false } c.pageInfo = &resp.apiPaged c.page = resp.Consumers c.offset += len(c.page) return true } // Page returns the current ConsumerInfo page. func (c *consumerLister) Page() []*ConsumerInfo { return c.page } // Err returns any errors found while fetching pages. func (c *consumerLister) Err() error { return c.err } // Consumers is used to retrieve a list of ConsumerInfo objects. func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo { o, cancel, err := getJSContextOpts(jsc.opts, opts...) if err != nil { return nil } ch := make(chan *ConsumerInfo) l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream} go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { select { case ch <- info: case <-o.ctx.Done(): return } } } }() return ch } // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. // Deprecated: Use Consumers() instead. func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo { return jsc.Consumers(stream, opts...) } type consumerNamesLister struct { stream string js *js err error offset int page []string pageInfo *apiPaged } // consumerNamesListResponse is the response for a Consumers Names List request. type consumerNamesListResponse struct { apiResponse apiPaged Consumers []string `json:"consumers"` } // Next fetches the next consumer names page. func (c *consumerNamesLister) Next() bool { if c.err != nil { return false } if err := checkStreamName(c.stream); err != nil { c.err = err return false } if c.pageInfo != nil && c.offset >= c.pageInfo.Total { return false } var cancel context.CancelFunc ctx := c.js.opts.ctx if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) defer cancel() } req, err := json.Marshal(consumersRequest{ apiPagedRequest: apiPagedRequest{Offset: c.offset}, }) if err != nil { c.err = err return false } clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream)) r, err := c.js.apiRequestWithContext(ctx, clSubj, req) if err != nil { c.err = err return false } var resp consumerNamesListResponse if err := json.Unmarshal(r.Data, &resp); err != nil { c.err = err return false } if resp.Error != nil { c.err = resp.Error return false } c.pageInfo = &resp.apiPaged c.page = resp.Consumers c.offset += len(c.page) return true } // Page returns the current ConsumerInfo page. func (c *consumerNamesLister) Page() []string { return c.page } // Err returns any errors found while fetching pages. func (c *consumerNamesLister) Err() error { return c.err } // ConsumerNames is used to retrieve a list of Consumer names. func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string { o, cancel, err := getJSContextOpts(jsc.opts, opts...) if err != nil { return nil } ch := make(chan string) l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}} go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { select { case ch <- info: case <-o.ctx.Done(): return } } } }() return ch } // streamCreateResponse stream creation. type streamCreateResponse struct { apiResponse *StreamInfo } func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { if cfg == nil { return nil, ErrStreamConfigRequired } if err := checkStreamName(cfg.Name); err != nil { return nil, err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } // In case we need to change anything, copy so we do not change the caller's version. ncfg := *cfg // If we have a mirror and an external domain, convert to ext.APIPrefix. if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ { // Copy so we do not change the caller's version. ncfg.Mirror = ncfg.Mirror.copy() if err := ncfg.Mirror.convertDomain(); err != nil { return nil, err } } // Check sources for the same. if len(ncfg.Sources) > 0 { ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) for i, ss := range ncfg.Sources { if ss.Domain != _EMPTY_ { ncfg.Sources[i] = ss.copy() if err := ncfg.Sources[i].convertDomain(); err != nil { return nil, err } } } } req, err := json.Marshal(&ncfg) if err != nil { return nil, err } csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name)) r, err := js.apiRequestWithContext(o.ctx, csSubj, req) if err != nil { return nil, err } var resp streamCreateResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return nil, err } if resp.Error != nil { if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) { return nil, ErrStreamNameAlreadyInUse } return nil, resp.Error } // check that input subject transform (if used) is reflected in the returned ConsumerInfo if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { return nil, ErrStreamSubjectTransformNotSupported } if len(cfg.Sources) != 0 { if len(cfg.Sources) != len(resp.Config.Sources) { return nil, ErrStreamSourceNotSupported } for i := range cfg.Sources { if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported } } } return resp.StreamInfo, nil } type ( // StreamInfoRequest contains additional option to return StreamInfoRequest struct { apiPagedRequest // DeletedDetails when true includes information about deleted messages DeletedDetails bool `json:"deleted_details,omitempty"` // SubjectsFilter when set, returns information on the matched subjects SubjectsFilter string `json:"subjects_filter,omitempty"` } streamInfoResponse = struct { apiResponse apiPaged *StreamInfo } ) func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { if err := checkStreamName(stream); err != nil { return nil, err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } var i int var subjectMessagesMap map[string]uint64 var req []byte var requestPayload bool var siOpts StreamInfoRequest if o.streamInfoOpts != nil { requestPayload = true siOpts = *o.streamInfoOpts } for { if requestPayload { siOpts.Offset = i if req, err = json.Marshal(&siOpts); err != nil { return nil, err } } siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream)) r, err := js.apiRequestWithContext(o.ctx, siSubj, req) if err != nil { return nil, err } var resp streamInfoResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return nil, err } if resp.Error != nil { if errors.Is(resp.Error, ErrStreamNotFound) { return nil, ErrStreamNotFound } return nil, resp.Error } var total int // for backwards compatibility if resp.Total != 0 { total = resp.Total } else { total = len(resp.State.Subjects) } if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 { if subjectMessagesMap == nil { subjectMessagesMap = make(map[string]uint64, total) } for k, j := range resp.State.Subjects { subjectMessagesMap[k] = j i++ } } if i >= total { if requestPayload { resp.StreamInfo.State.Subjects = subjectMessagesMap } return resp.StreamInfo, nil } } } // StreamInfo shows config and current state for this stream. type StreamInfo struct { Config StreamConfig `json:"config"` Created time.Time `json:"created"` State StreamState `json:"state"` Cluster *ClusterInfo `json:"cluster,omitempty"` Mirror *StreamSourceInfo `json:"mirror,omitempty"` Sources []*StreamSourceInfo `json:"sources,omitempty"` Alternates []*StreamAlternate `json:"alternates,omitempty"` } // StreamAlternate is an alternate stream represented by a mirror. type StreamAlternate struct { Name string `json:"name"` Domain string `json:"domain,omitempty"` Cluster string `json:"cluster"` } // StreamSourceInfo shows information about an upstream stream source. type StreamSourceInfo struct { Name string `json:"name"` Lag uint64 `json:"lag"` Active time.Duration `json:"active"` External *ExternalStream `json:"external"` Error *APIError `json:"error"` FilterSubject string `json:"filter_subject,omitempty"` SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` } // StreamState is information about the given stream. type StreamState struct { Msgs uint64 `json:"messages"` Bytes uint64 `json:"bytes"` FirstSeq uint64 `json:"first_seq"` FirstTime time.Time `json:"first_ts"` LastSeq uint64 `json:"last_seq"` LastTime time.Time `json:"last_ts"` Consumers int `json:"consumer_count"` Deleted []uint64 `json:"deleted"` NumDeleted int `json:"num_deleted"` NumSubjects uint64 `json:"num_subjects"` Subjects map[string]uint64 `json:"subjects"` } // ClusterInfo shows information about the underlying set of servers // that make up the stream or consumer. type ClusterInfo struct { Name string `json:"name,omitempty"` Leader string `json:"leader,omitempty"` Replicas []*PeerInfo `json:"replicas,omitempty"` } // PeerInfo shows information about all the peers in the cluster that // are supporting the stream or consumer. type PeerInfo struct { Name string `json:"name"` Current bool `json:"current"` Offline bool `json:"offline,omitempty"` Active time.Duration `json:"active"` Lag uint64 `json:"lag,omitempty"` } // UpdateStream updates a Stream. func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { if cfg == nil { return nil, ErrStreamConfigRequired } if err := checkStreamName(cfg.Name); err != nil { return nil, err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } req, err := json.Marshal(cfg) if err != nil { return nil, err } usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name)) r, err := js.apiRequestWithContext(o.ctx, usSubj, req) if err != nil { return nil, err } var resp streamInfoResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return nil, err } if resp.Error != nil { if errors.Is(resp.Error, ErrStreamNotFound) { return nil, ErrStreamNotFound } return nil, resp.Error } // check that input subject transform (if used) is reflected in the returned StreamInfo if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { return nil, ErrStreamSubjectTransformNotSupported } if len(cfg.Sources) != 0 { if len(cfg.Sources) != len(resp.Config.Sources) { return nil, ErrStreamSourceNotSupported } for i := range cfg.Sources { if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported } } } return resp.StreamInfo, nil } // streamDeleteResponse is the response for a Stream delete request. type streamDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } // DeleteStream deletes a Stream. func (js *js) DeleteStream(name string, opts ...JSOpt) error { if err := checkStreamName(name); err != nil { return err } o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err } if cancel != nil { defer cancel() } dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) if err != nil { return err } var resp streamDeleteResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return err } if resp.Error != nil { if errors.Is(resp.Error, ErrStreamNotFound) { return ErrStreamNotFound } return resp.Error } return nil } type apiMsgGetRequest struct { Seq uint64 `json:"seq,omitempty"` LastFor string `json:"last_by_subj,omitempty"` NextFor string `json:"next_by_subj,omitempty"` } // RawStreamMsg is a raw message stored in JetStream. type RawStreamMsg struct { Subject string Sequence uint64 Header Header Data []byte Time time.Time } // storedMsg is a raw message stored in JetStream. type storedMsg struct { Subject string `json:"subject"` Sequence uint64 `json:"seq"` Header []byte `json:"hdrs,omitempty"` Data []byte `json:"data,omitempty"` Time time.Time `json:"time"` } // apiMsgGetResponse is the response for a Stream get request. type apiMsgGetResponse struct { apiResponse Message *storedMsg `json:"message,omitempty"` } // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) { return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...) } // GetMsg retrieves a raw stream message stored in JetStream by sequence number. func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) { return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...) } // Low level getMsg func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return nil, err } if cancel != nil { defer cancel() } if err := checkStreamName(name); err != nil { return nil, err } var apiSubj string if o.directGet && mreq.LastFor != _EMPTY_ { apiSubj = apiDirectMsgGetLastBySubjectT dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor)) r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) if err != nil { return nil, err } return convertDirectGetMsgResponseToMsg(name, r) } if o.directGet { apiSubj = apiDirectMsgGetT mreq.NextFor = o.directNextFor } else { apiSubj = apiMsgGetT } req, err := json.Marshal(mreq) if err != nil { return nil, err } dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name)) r, err := js.apiRequestWithContext(o.ctx, dsSubj, req) if err != nil { return nil, err } if o.directGet { return convertDirectGetMsgResponseToMsg(name, r) } var resp apiMsgGetResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return nil, err } if resp.Error != nil { if errors.Is(resp.Error, ErrMsgNotFound) { return nil, ErrMsgNotFound } if errors.Is(resp.Error, ErrStreamNotFound) { return nil, ErrStreamNotFound } return nil, resp.Error } msg := resp.Message var hdr Header if len(msg.Header) > 0 { hdr, err = DecodeHeadersMsg(msg.Header) if err != nil { return nil, err } } return &RawStreamMsg{ Subject: msg.Subject, Sequence: msg.Sequence, Header: hdr, Data: msg.Data, Time: msg.Time, }, nil } func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) { // Check for 404/408. We would get a no-payload message and a "Status" header if len(r.Data) == 0 { val := r.Header.Get(statusHdr) if val != _EMPTY_ { switch val { case noMessagesSts: return nil, ErrMsgNotFound default: desc := r.Header.Get(descrHdr) if desc == _EMPTY_ { desc = "unable to get message" } return nil, fmt.Errorf("nats: %s", desc) } } } // Check for headers that give us the required information to // reconstruct the message. if len(r.Header) == 0 { return nil, errors.New("nats: response should have headers") } stream := r.Header.Get(JSStream) if stream == _EMPTY_ { return nil, errors.New("nats: missing stream header") } // Mirrors can now answer direct gets, so removing check for name equality. // TODO(dlc) - We could have server also have a header with origin and check that? seqStr := r.Header.Get(JSSequence) if seqStr == _EMPTY_ { return nil, errors.New("nats: missing sequence header") } seq, err := strconv.ParseUint(seqStr, 10, 64) if err != nil { return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) } timeStr := r.Header.Get(JSTimeStamp) if timeStr == _EMPTY_ { return nil, errors.New("nats: missing timestamp header") } // Temporary code: the server in main branch is sending with format // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed // to use format RFC3339Nano. Because of server test deps/cycle, // support both until the server PR lands. tm, err := time.Parse(time.RFC3339Nano, timeStr) if err != nil { tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr) if err != nil { return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) } } subj := r.Header.Get(JSSubject) if subj == _EMPTY_ { return nil, errors.New("nats: missing subject header") } return &RawStreamMsg{ Subject: subj, Sequence: seq, Header: r.Header, Data: r.Data, Time: tm, }, nil } type msgDeleteRequest struct { Seq uint64 `json:"seq"` NoErase bool `json:"no_erase,omitempty"` } // msgDeleteResponse is the response for a Stream delete request. type msgDeleteResponse struct { apiResponse Success bool `json:"success,omitempty"` } // DeleteMsg deletes a message from a stream. // The message is marked as erased, but not overwritten func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err } if cancel != nil { defer cancel() } return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true}) } // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data // As a result, this operation is slower than DeleteMsg() func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err } if cancel != nil { defer cancel() } return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq}) } func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error { if err := checkStreamName(stream); err != nil { return err } reqJSON, err := json.Marshal(req) if err != nil { return err } dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream)) r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON) if err != nil { return err } var resp msgDeleteResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return err } if resp.Error != nil { return resp.Error } return nil } // StreamPurgeRequest is optional request information to the purge API. type StreamPurgeRequest struct { // Purge up to but not including sequence. Sequence uint64 `json:"seq,omitempty"` // Subject to match against messages for the purge command. Subject string `json:"filter,omitempty"` // Number of messages to keep. Keep uint64 `json:"keep,omitempty"` } type streamPurgeResponse struct { apiResponse Success bool `json:"success,omitempty"` Purged uint64 `json:"purged"` } // PurgeStream purges messages on a Stream. func (js *js) PurgeStream(stream string, opts ...JSOpt) error { if err := checkStreamName(stream); err != nil { return err } var req *StreamPurgeRequest var ok bool for _, opt := range opts { // For PurgeStream, only request body opt is relevant if req, ok = opt.(*StreamPurgeRequest); ok { break } } return js.purgeStream(stream, req) } func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { return err } if cancel != nil { defer cancel() } var b []byte if req != nil { if b, err = json.Marshal(req); err != nil { return err } } psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream)) r, err := js.apiRequestWithContext(o.ctx, psSubj, b) if err != nil { return err } var resp streamPurgeResponse if err := json.Unmarshal(r.Data, &resp); err != nil { return err } if resp.Error != nil { if errors.Is(resp.Error, ErrBadRequest) { return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body") } return resp.Error } return nil } // streamLister fetches pages of StreamInfo objects. This object is not safe // to use for multiple threads. type streamLister struct { js *js page []*StreamInfo err error offset int pageInfo *apiPaged } // streamListResponse list of detailed stream information. // A nil request is valid and means all streams. type streamListResponse struct { apiResponse apiPaged Streams []*StreamInfo `json:"streams"` } // streamNamesRequest is used for Stream Name requests. type streamNamesRequest struct { apiPagedRequest // These are filters that can be applied to the list. Subject string `json:"subject,omitempty"` } // Next fetches the next StreamInfo page. func (s *streamLister) Next() bool { if s.err != nil { return false } if s.pageInfo != nil && s.offset >= s.pageInfo.Total { return false } req, err := json.Marshal(streamNamesRequest{ apiPagedRequest: apiPagedRequest{Offset: s.offset}, Subject: s.js.opts.streamListSubject, }) if err != nil { s.err = err return false } var cancel context.CancelFunc ctx := s.js.opts.ctx if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait) defer cancel() } slSubj := s.js.apiSubj(apiStreamListT) r, err := s.js.apiRequestWithContext(ctx, slSubj, req) if err != nil { s.err = err return false } var resp streamListResponse if err := json.Unmarshal(r.Data, &resp); err != nil { s.err = err return false } if resp.Error != nil { s.err = resp.Error return false } s.pageInfo = &resp.apiPaged s.page = resp.Streams s.offset += len(s.page) return true } // Page returns the current StreamInfo page. func (s *streamLister) Page() []*StreamInfo { return s.page } // Err returns any errors found while fetching pages. func (s *streamLister) Err() error { return s.err } // Streams can be used to retrieve a list of StreamInfo objects. func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo { o, cancel, err := getJSContextOpts(jsc.opts, opts...) if err != nil { return nil } ch := make(chan *StreamInfo) l := &streamLister{js: &js{nc: jsc.nc, opts: o}} go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { select { case ch <- info: case <-o.ctx.Done(): return } } } }() return ch } // StreamsInfo can be used to retrieve a list of StreamInfo objects. // Deprecated: Use Streams() instead. func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo { return jsc.Streams(opts...) } type streamNamesLister struct { js *js err error offset int page []string pageInfo *apiPaged } // Next fetches the next stream names page. func (l *streamNamesLister) Next() bool { if l.err != nil { return false } if l.pageInfo != nil && l.offset >= l.pageInfo.Total { return false } var cancel context.CancelFunc ctx := l.js.opts.ctx if ctx == nil { ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait) defer cancel() } req, err := json.Marshal(streamNamesRequest{ apiPagedRequest: apiPagedRequest{Offset: l.offset}, Subject: l.js.opts.streamListSubject, }) if err != nil { l.err = err return false } r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req) if err != nil { l.err = err return false } var resp streamNamesResponse if err := json.Unmarshal(r.Data, &resp); err != nil { l.err = err return false } if resp.Error != nil { l.err = resp.Error return false } l.pageInfo = &resp.apiPaged l.page = resp.Streams l.offset += len(l.page) return true } // Page returns the current ConsumerInfo page. func (l *streamNamesLister) Page() []string { return l.page } // Err returns any errors found while fetching pages. func (l *streamNamesLister) Err() error { return l.err } // StreamNames is used to retrieve a list of Stream names. func (jsc *js) StreamNames(opts ...JSOpt) <-chan string { o, cancel, err := getJSContextOpts(jsc.opts, opts...) if err != nil { return nil } ch := make(chan string) l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}} go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { select { case ch <- info: case <-o.ctx.Done(): return } } } }() return ch } // StreamNameBySubject returns a stream name that matches the subject. func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) { o, cancel, err := getJSContextOpts(jsc.opts, opts...) if err != nil { return "", err } if cancel != nil { defer cancel() } var slr streamNamesResponse req := &streamRequest{subj} j, err := json.Marshal(req) if err != nil { return _EMPTY_, err } resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j) if err != nil { if errors.Is(err, ErrNoResponders) { err = ErrJetStreamNotEnabled } return _EMPTY_, err } if err := json.Unmarshal(resp.Data, &slr); err != nil { return _EMPTY_, err } if slr.Error != nil || len(slr.Streams) != 1 { return _EMPTY_, ErrNoMatchingStream } return slr.Streams[0], nil } func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) { var o jsOpts for _, opt := range opts { if err := opt.configureJSContext(&o); err != nil { return nil, nil, err } } // Check for option collisions. Right now just timeout and context. if o.ctx != nil && o.wait != 0 { return nil, nil, ErrContextAndTimeout } if o.wait == 0 && o.ctx == nil { o.wait = defs.wait } var cancel context.CancelFunc if o.ctx == nil && o.wait > 0 { o.ctx, cancel = context.WithTimeout(context.Background(), o.wait) } if o.pre == _EMPTY_ { o.pre = defs.pre } return &o, cancel, nil } nats.go-1.41.0/kv.go000066400000000000000000001036121477351342400141320ustar00rootroot00000000000000// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "context" "errors" "fmt" "reflect" "regexp" "strconv" "strings" "sync" "time" "github.com/nats-io/nats.go/internal/parser" ) // KeyValueManager is used to manage KeyValue stores. type KeyValueManager interface { // KeyValue will lookup and bind to an existing KeyValue store. KeyValue(bucket string) (KeyValue, error) // CreateKeyValue will create a KeyValue store with the following configuration. CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) // DeleteKeyValue will delete this KeyValue store (JetStream stream). DeleteKeyValue(bucket string) error // KeyValueStoreNames is used to retrieve a list of key value store names KeyValueStoreNames() <-chan string // KeyValueStores is used to retrieve a list of key value store statuses KeyValueStores() <-chan KeyValueStatus } // KeyValue contains methods to operate on a KeyValue store. type KeyValue interface { // Get returns the latest value for the key. Get(key string) (entry KeyValueEntry, err error) // GetRevision returns a specific revision value for the key. GetRevision(key string, revision uint64) (entry KeyValueEntry, err error) // Put will place the new value for the key into the store. Put(key string, value []byte) (revision uint64, err error) // PutString will place the string for the key into the store. PutString(key string, value string) (revision uint64, err error) // Create will add the key/value pair iff it does not exist. Create(key string, value []byte) (revision uint64, err error) // Update will update the value iff the latest revision matches. // Update also resets the TTL associated with the key (if any). Update(key string, value []byte, last uint64) (revision uint64, err error) // Delete will place a delete marker and leave all revisions. Delete(key string, opts ...DeleteOpt) error // Purge will place a delete marker and remove all previous revisions. Purge(key string, opts ...DeleteOpt) error // Watch for any updates to keys that match the keys argument which could include wildcards. // Watch will send a nil entry when it has received all initial values. Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) // WatchAll will invoke the callback for all updates. WatchAll(opts ...WatchOpt) (KeyWatcher, error) // WatchFiltered will watch for any updates to keys that match the keys // argument. It can be configured with the same options as Watch. WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error) // Keys will return all keys. // Deprecated: Use ListKeys instead to avoid memory issues. Keys(opts ...WatchOpt) ([]string, error) // ListKeys will return all keys in a channel. ListKeys(opts ...WatchOpt) (KeyLister, error) // History will return all historical values for the key. History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) // Bucket returns the current bucket name. Bucket() string // PurgeDeletes will remove all current delete markers. PurgeDeletes(opts ...PurgeOpt) error // Status retrieves the status and configuration of a bucket Status() (KeyValueStatus, error) } // KeyValueStatus is run-time status about a Key-Value bucket type KeyValueStatus interface { // Bucket the name of the bucket Bucket() string // Values is how many messages are in the bucket, including historical values Values() uint64 // History returns the configured history kept per key History() int64 // TTL is how long the bucket keeps values for TTL() time.Duration // BackingStore indicates what technology is used for storage of the bucket BackingStore() string // Bytes returns the size in bytes of the bucket Bytes() uint64 // IsCompressed indicates if the data is compressed on disk IsCompressed() bool } // KeyWatcher is what is returned when doing a watch. type KeyWatcher interface { // Context returns watcher context optionally provided by nats.Context option. Context() context.Context // Updates returns a channel to read any updates to entries. Updates() <-chan KeyValueEntry // Stop will stop this watcher. Stop() error } // KeyLister is used to retrieve a list of key value store keys type KeyLister interface { Keys() <-chan string Stop() error } type WatchOpt interface { configureWatcher(opts *watchOpts) error } // For nats.Context() support. func (ctx ContextOpt) configureWatcher(opts *watchOpts) error { opts.ctx = ctx return nil } type watchOpts struct { ctx context.Context // Do not send delete markers to the update channel. ignoreDeletes bool // Include all history per subject, not just last one. includeHistory bool // Include only updates for keys. updatesOnly bool // retrieve only the meta data of the entry metaOnly bool } type watchOptFn func(opts *watchOpts) error func (opt watchOptFn) configureWatcher(opts *watchOpts) error { return opt(opts) } // IncludeHistory instructs the key watcher to include historical values as well. func IncludeHistory() WatchOpt { return watchOptFn(func(opts *watchOpts) error { if opts.updatesOnly { return errors.New("nats: include history can not be used with updates only") } opts.includeHistory = true return nil }) } // UpdatesOnly instructs the key watcher to only include updates on values (without latest values when started). func UpdatesOnly() WatchOpt { return watchOptFn(func(opts *watchOpts) error { if opts.includeHistory { return errors.New("nats: updates only can not be used with include history") } opts.updatesOnly = true return nil }) } // IgnoreDeletes will have the key watcher not pass any deleted keys. func IgnoreDeletes() WatchOpt { return watchOptFn(func(opts *watchOpts) error { opts.ignoreDeletes = true return nil }) } // MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value func MetaOnly() WatchOpt { return watchOptFn(func(opts *watchOpts) error { opts.metaOnly = true return nil }) } type PurgeOpt interface { configurePurge(opts *purgeOpts) error } type purgeOpts struct { dmthr time.Duration // Delete markers threshold ctx context.Context } // DeleteMarkersOlderThan indicates that delete or purge markers older than that // will be deleted as part of PurgeDeletes() operation, otherwise, only the data // will be removed but markers that are recent will be kept. // Note that if no option is specified, the default is 30 minutes. You can set // this option to a negative value to instruct to always remove the markers, // regardless of their age. type DeleteMarkersOlderThan time.Duration func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { opts.dmthr = time.Duration(ttl) return nil } // For nats.Context() support. func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { opts.ctx = ctx return nil } type DeleteOpt interface { configureDelete(opts *deleteOpts) error } type deleteOpts struct { // Remove all previous revisions. purge bool // Delete only if the latest revision matches. revision uint64 } type deleteOptFn func(opts *deleteOpts) error func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { return opt(opts) } // LastRevision deletes if the latest revision matches. func LastRevision(revision uint64) DeleteOpt { return deleteOptFn(func(opts *deleteOpts) error { opts.revision = revision return nil }) } // purge removes all previous revisions. func purge() DeleteOpt { return deleteOptFn(func(opts *deleteOpts) error { opts.purge = true return nil }) } // KeyValueConfig is for configuring a KeyValue store. type KeyValueConfig struct { Bucket string `json:"bucket"` Description string `json:"description,omitempty"` MaxValueSize int32 `json:"max_value_size,omitempty"` History uint8 `json:"history,omitempty"` TTL time.Duration `json:"ttl,omitempty"` MaxBytes int64 `json:"max_bytes,omitempty"` Storage StorageType `json:"storage,omitempty"` Replicas int `json:"num_replicas,omitempty"` Placement *Placement `json:"placement,omitempty"` RePublish *RePublish `json:"republish,omitempty"` Mirror *StreamSource `json:"mirror,omitempty"` Sources []*StreamSource `json:"sources,omitempty"` // Enable underlying stream compression. // NOTE: Compression is supported for nats-server 2.10.0+ Compression bool `json:"compression,omitempty"` } // Used to watch all keys. const ( KeyValueMaxHistory = 64 AllKeys = ">" kvLatestRevision = 0 kvop = "KV-Operation" kvdel = "DEL" kvpurge = "PURGE" ) type KeyValueOp uint8 const ( KeyValuePut KeyValueOp = iota KeyValueDelete KeyValuePurge ) func (op KeyValueOp) String() string { switch op { case KeyValuePut: return "KeyValuePutOp" case KeyValueDelete: return "KeyValueDeleteOp" case KeyValuePurge: return "KeyValuePurgeOp" default: return "Unknown Operation" } } // KeyValueEntry is a retrieved entry for Get or List or Watch. type KeyValueEntry interface { // Bucket is the bucket the data was loaded from. Bucket() string // Key is the key that was retrieved. Key() string // Value is the retrieved value. Value() []byte // Revision is a unique sequence for this value. Revision() uint64 // Created is the time the data was put in the bucket. Created() time.Time // Delta is distance from the latest value. Delta() uint64 // Operation returns Put or Delete or Purge. Operation() KeyValueOp } // Errors var ( ErrKeyValueConfigRequired = errors.New("nats: config required") ErrInvalidBucketName = errors.New("nats: invalid bucket name") ErrInvalidKey = errors.New("nats: invalid key") ErrBucketNotFound = errors.New("nats: bucket not found") ErrBadBucket = errors.New("nats: bucket not valid key-value store") ErrKeyNotFound = errors.New("nats: key not found") ErrKeyDeleted = errors.New("nats: key was deleted") ErrHistoryToLarge = errors.New("nats: history limited to a max of 64") ErrNoKeysFound = errors.New("nats: no keys found") ) var ( ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} ) const ( kvBucketNamePre = "KV_" kvBucketNameTmpl = "KV_%s" kvSubjectsTmpl = "$KV.%s.>" kvSubjectsPreTmpl = "$KV.%s." kvSubjectsPreDomainTmpl = "%s.$KV.%s." kvNoPending = "0" ) // Regex for valid keys and buckets. var ( validBucketRe = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) validKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9]+$`) validSearchKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9*]*[>]?$`) ) // KeyValue will lookup and bind to an existing KeyValue store. func (js *js) KeyValue(bucket string) (KeyValue, error) { if !js.nc.serverMinVersion(2, 6, 2) { return nil, errors.New("nats: key-value requires at least server version 2.6.2") } if !bucketValid(bucket) { return nil, ErrInvalidBucketName } stream := fmt.Sprintf(kvBucketNameTmpl, bucket) si, err := js.StreamInfo(stream) if err != nil { if errors.Is(err, ErrStreamNotFound) { err = ErrBucketNotFound } return nil, err } // Do some quick sanity checks that this is a correctly formed stream for KV. // Max msgs per subject should be > 0. if si.Config.MaxMsgsPerSubject < 1 { return nil, ErrBadBucket } return mapStreamToKVS(js, si), nil } // CreateKeyValue will create a KeyValue store with the following configuration. func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { if !js.nc.serverMinVersion(2, 6, 2) { return nil, errors.New("nats: key-value requires at least server version 2.6.2") } if cfg == nil { return nil, ErrKeyValueConfigRequired } if !bucketValid(cfg.Bucket) { return nil, ErrInvalidBucketName } if _, err := js.AccountInfo(); err != nil { return nil, err } // Default to 1 for history. Max is 64 for now. history := int64(1) if cfg.History > 0 { if cfg.History > KeyValueMaxHistory { return nil, ErrHistoryToLarge } history = int64(cfg.History) } replicas := cfg.Replicas if replicas == 0 { replicas = 1 } // We will set explicitly some values so that we can do comparison // if we get an "already in use" error and need to check if it is same. maxBytes := cfg.MaxBytes if maxBytes == 0 { maxBytes = -1 } maxMsgSize := cfg.MaxValueSize if maxMsgSize == 0 { maxMsgSize = -1 } // When stream's MaxAge is not set, server uses 2 minutes as the default // for the duplicate window. If MaxAge is set, and lower than 2 minutes, // then the duplicate window will be set to that. If MaxAge is greater, // we will cap the duplicate window to 2 minutes (to be consistent with // previous behavior). duplicateWindow := 2 * time.Minute if cfg.TTL > 0 && cfg.TTL < duplicateWindow { duplicateWindow = cfg.TTL } var compression StoreCompression if cfg.Compression { compression = S2Compression } scfg := &StreamConfig{ Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), Description: cfg.Description, MaxMsgsPerSubject: history, MaxBytes: maxBytes, MaxAge: cfg.TTL, MaxMsgSize: maxMsgSize, Storage: cfg.Storage, Replicas: replicas, Placement: cfg.Placement, AllowRollup: true, DenyDelete: true, Duplicates: duplicateWindow, MaxMsgs: -1, MaxConsumers: -1, AllowDirect: true, RePublish: cfg.RePublish, Compression: compression, } if cfg.Mirror != nil { // Copy in case we need to make changes so we do not change caller's version. m := cfg.Mirror.copy() if !strings.HasPrefix(m.Name, kvBucketNamePre) { m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) } scfg.Mirror = m scfg.MirrorDirect = true } else if len(cfg.Sources) > 0 { for _, ss := range cfg.Sources { var sourceBucketName string if strings.HasPrefix(ss.Name, kvBucketNamePre) { sourceBucketName = ss.Name[len(kvBucketNamePre):] } else { sourceBucketName = ss.Name ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) } if ss.External == nil || sourceBucketName != cfg.Bucket { ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} } scfg.Sources = append(scfg.Sources, ss) } scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} } else { scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} } // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. if js.nc.serverMinVersion(2, 7, 2) { scfg.Discard = DiscardNew } si, err := js.AddStream(scfg) if err != nil { // If we have a failure to add, it could be because we have // a config change if the KV was created against a pre 2.7.2 // and we are now moving to a v2.7.2+. If that is the case // and the only difference is the discard policy, then update // the stream. // The same logic applies for KVs created pre 2.9.x and // the AllowDirect setting. if errors.Is(err, ErrStreamNameAlreadyInUse) { if si, _ = js.StreamInfo(scfg.Name); si != nil { // To compare, make the server's stream info discard // policy same than ours. si.Config.Discard = scfg.Discard // Also need to set allow direct for v2.9.x+ si.Config.AllowDirect = scfg.AllowDirect if reflect.DeepEqual(&si.Config, scfg) { si, err = js.UpdateStream(scfg) } } } if err != nil { return nil, err } } return mapStreamToKVS(js, si), nil } // DeleteKeyValue will delete this KeyValue store (JetStream stream). func (js *js) DeleteKeyValue(bucket string) error { if !bucketValid(bucket) { return ErrInvalidBucketName } stream := fmt.Sprintf(kvBucketNameTmpl, bucket) return js.DeleteStream(stream) } type kvs struct { name string stream string pre string putPre string js *js // If true, it means that APIPrefix/Domain was set in the context // and we need to add something to some of our high level protocols // (such as Put, etc..) useJSPfx bool // To know if we can use the stream direct get API useDirect bool } // Underlying entry. type kve struct { bucket string key string value []byte revision uint64 delta uint64 created time.Time op KeyValueOp } func (e *kve) Bucket() string { return e.bucket } func (e *kve) Key() string { return e.key } func (e *kve) Value() []byte { return e.value } func (e *kve) Revision() uint64 { return e.revision } func (e *kve) Created() time.Time { return e.created } func (e *kve) Delta() uint64 { return e.delta } func (e *kve) Operation() KeyValueOp { return e.op } func bucketValid(bucket string) bool { if len(bucket) == 0 { return false } return validBucketRe.MatchString(bucket) } func keyValid(key string) bool { if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { return false } return validKeyRe.MatchString(key) } func searchKeyValid(key string) bool { if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { return false } return validSearchKeyRe.MatchString(key) } // Get returns the latest value for the key. func (kv *kvs) Get(key string) (KeyValueEntry, error) { e, err := kv.get(key, kvLatestRevision) if err != nil { if errors.Is(err, ErrKeyDeleted) { return nil, ErrKeyNotFound } return nil, err } return e, nil } // GetRevision returns a specific revision value for the key. func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) { e, err := kv.get(key, revision) if err != nil { if errors.Is(err, ErrKeyDeleted) { return nil, ErrKeyNotFound } return nil, err } return e, nil } func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) { if !keyValid(key) { return nil, ErrInvalidKey } var b strings.Builder b.WriteString(kv.pre) b.WriteString(key) var m *RawStreamMsg var err error var _opts [1]JSOpt opts := _opts[:0] if kv.useDirect { opts = append(opts, DirectGet()) } if revision == kvLatestRevision { m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...) } else { m, err = kv.js.GetMsg(kv.stream, revision, opts...) // If a sequence was provided, just make sure that the retrieved // message subject matches the request. if err == nil && m.Subject != b.String() { return nil, ErrKeyNotFound } } if err != nil { if errors.Is(err, ErrMsgNotFound) { err = ErrKeyNotFound } return nil, err } entry := &kve{ bucket: kv.name, key: key, value: m.Data, revision: m.Sequence, created: m.Time, } // Double check here that this is not a DEL Operation marker. if len(m.Header) > 0 { switch m.Header.Get(kvop) { case kvdel: entry.op = KeyValueDelete return entry, ErrKeyDeleted case kvpurge: entry.op = KeyValuePurge return entry, ErrKeyDeleted } } return entry, nil } // Put will place the new value for the key into the store. func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) { if !keyValid(key) { return 0, ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.pre) } if kv.putPre != _EMPTY_ { b.WriteString(kv.putPre) } else { b.WriteString(kv.pre) } b.WriteString(key) pa, err := kv.js.Publish(b.String(), value) if err != nil { return 0, err } return pa.Sequence, err } // PutString will place the string for the key into the store. func (kv *kvs) PutString(key string, value string) (revision uint64, err error) { return kv.Put(key, []byte(value)) } // Create will add the key/value pair if it does not exist. func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) { v, err := kv.Update(key, value, 0) if err == nil { return v, nil } // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that // so we need to double check. if e, err := kv.get(key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) { return kv.Update(key, value, e.Revision()) } // Check if the expected last subject sequence is not zero which implies // the key already exists. if errors.Is(err, ErrKeyExists) { jserr := ErrKeyExists.(*jsError) return 0, fmt.Errorf("%w: %s", err, jserr.message) } return 0, err } // Update will update the value if the latest revision matches. func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) { if !keyValid(key) { return 0, ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.pre) } b.WriteString(kv.pre) b.WriteString(key) m := Msg{Subject: b.String(), Header: Header{}, Data: value} m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10)) pa, err := kv.js.PublishMsg(&m) if err != nil { return 0, err } return pa.Sequence, err } // Delete will place a delete marker and leave all revisions. func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { if !keyValid(key) { return ErrInvalidKey } var b strings.Builder if kv.useJSPfx { b.WriteString(kv.js.opts.pre) } if kv.putPre != _EMPTY_ { b.WriteString(kv.putPre) } else { b.WriteString(kv.pre) } b.WriteString(key) // DEL op marker. For watch functionality. m := NewMsg(b.String()) var o deleteOpts for _, opt := range opts { if opt != nil { if err := opt.configureDelete(&o); err != nil { return err } } } if o.purge { m.Header.Set(kvop, kvpurge) m.Header.Set(MsgRollup, MsgRollupSubject) } else { m.Header.Set(kvop, kvdel) } if o.revision != 0 { m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) } _, err := kv.js.PublishMsg(m) return err } // Purge will remove the key and all revisions. func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { return kv.Delete(key, append(opts, purge())...) } const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute // PurgeDeletes will remove all current delete markers. // This is a maintenance option if there is a larger buildup of delete markers. // See DeleteMarkersOlderThan() option for more information. func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error { var o purgeOpts for _, opt := range opts { if opt != nil { if err := opt.configurePurge(&o); err != nil { return err } } } // Transfer possible context purge option to the watcher. This is the // only option that matters for the PurgeDeletes() feature. var wopts []WatchOpt if o.ctx != nil { wopts = append(wopts, Context(o.ctx)) } watcher, err := kv.WatchAll(wopts...) if err != nil { return err } defer watcher.Stop() var limit time.Time olderThan := o.dmthr // Negative value is used to instruct to always remove markers, regardless // of age. If set to 0 (or not set), use our default value. if olderThan == 0 { olderThan = kvDefaultPurgeDeletesMarkerThreshold } if olderThan > 0 { limit = time.Now().Add(-olderThan) } var deleteMarkers []KeyValueEntry for entry := range watcher.Updates() { if entry == nil { break } if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { deleteMarkers = append(deleteMarkers, entry) } } // Stop watcher here so as we purge we do not have the system continually updating numPending. watcher.Stop() var ( pr StreamPurgeRequest b strings.Builder ) // Do actual purges here. for _, entry := range deleteMarkers { b.WriteString(kv.pre) b.WriteString(entry.Key()) pr.Subject = b.String() pr.Keep = 0 if olderThan > 0 && entry.Created().After(limit) { pr.Keep = 1 } if err := kv.js.purgeStream(kv.stream, &pr); err != nil { return err } b.Reset() } return nil } // Keys() will return all keys. func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) { opts = append(opts, IgnoreDeletes(), MetaOnly()) watcher, err := kv.WatchAll(opts...) if err != nil { return nil, err } defer watcher.Stop() var keys []string for entry := range watcher.Updates() { if entry == nil { break } keys = append(keys, entry.Key()) } if len(keys) == 0 { return nil, ErrNoKeysFound } return keys, nil } type keyLister struct { watcher KeyWatcher keys chan string } // ListKeys will return all keys. func (kv *kvs) ListKeys(opts ...WatchOpt) (KeyLister, error) { opts = append(opts, IgnoreDeletes(), MetaOnly()) watcher, err := kv.WatchAll(opts...) if err != nil { return nil, err } kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} go func() { defer close(kl.keys) defer watcher.Stop() for entry := range watcher.Updates() { if entry == nil { return } kl.keys <- entry.Key() } }() return kl, nil } func (kl *keyLister) Keys() <-chan string { return kl.keys } func (kl *keyLister) Stop() error { return kl.watcher.Stop() } // History will return all values for the key. func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) { opts = append(opts, IncludeHistory()) watcher, err := kv.Watch(key, opts...) if err != nil { return nil, err } defer watcher.Stop() var entries []KeyValueEntry for entry := range watcher.Updates() { if entry == nil { break } entries = append(entries, entry) } if len(entries) == 0 { return nil, ErrKeyNotFound } return entries, nil } // Implementation for Watch type watcher struct { mu sync.Mutex updates chan KeyValueEntry sub *Subscription initDone bool initPending uint64 received uint64 ctx context.Context initDoneTimer *time.Timer } // Context returns the context for the watcher if set. func (w *watcher) Context() context.Context { if w == nil { return nil } return w.ctx } // Updates returns the interior channel. func (w *watcher) Updates() <-chan KeyValueEntry { if w == nil { return nil } return w.updates } // Stop will unsubscribe from the watcher. func (w *watcher) Stop() error { if w == nil { return nil } return w.sub.Unsubscribe() } // WatchAll watches all keys. func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) { return kv.Watch(AllKeys, opts...) } func (kv *kvs) WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error) { for _, key := range keys { if !searchKeyValid(key) { return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "key cannot be empty and must be a valid NATS subject") } } var o watchOpts for _, opt := range opts { if opt != nil { if err := opt.configureWatcher(&o); err != nil { return nil, err } } } // Could be a pattern so don't check for validity as we normally do. for i, key := range keys { var b strings.Builder b.WriteString(kv.pre) b.WriteString(key) keys[i] = b.String() } // if no keys are provided, watch all keys if len(keys) == 0 { var b strings.Builder b.WriteString(kv.pre) b.WriteString(AllKeys) keys = []string{b.String()} } // We will block below on placing items on the chan. That is by design. w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} update := func(m *Msg) { tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { return } if len(m.Subject) <= len(kv.pre) { return } subj := m.Subject[len(kv.pre):] var op KeyValueOp if len(m.Header) > 0 { switch m.Header.Get(kvop) { case kvdel: op = KeyValueDelete case kvpurge: op = KeyValuePurge } } delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) w.mu.Lock() defer w.mu.Unlock() if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { entry := &kve{ bucket: kv.name, key: subj, value: m.Data, revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), delta: delta, op: op, } w.updates <- entry } // Check if done and initial values. // Skip if UpdatesOnly() is set, since there will never be updates initially. if !w.initDone { w.received++ // We set this on the first trip through.. if w.initPending == 0 { w.initPending = delta } if w.received > w.initPending || delta == 0 { w.initDoneTimer.Stop() w.initDone = true w.updates <- nil } else if w.initDoneTimer != nil { w.initDoneTimer.Reset(kv.js.opts.wait) } } } // Used ordered consumer to deliver results. subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()} if !o.includeHistory { subOpts = append(subOpts, DeliverLastPerSubject()) } if o.updatesOnly { subOpts = append(subOpts, DeliverNew()) } if o.metaOnly { subOpts = append(subOpts, HeadersOnly()) } if o.ctx != nil { subOpts = append(subOpts, Context(o.ctx)) } // Create the sub and rest of initialization under the lock. // We want to prevent the race between this code and the // update() callback. w.mu.Lock() defer w.mu.Unlock() var sub *Subscription var err error if len(keys) == 1 { sub, err = kv.js.Subscribe(keys[0], update, subOpts...) } else { subOpts = append(subOpts, ConsumerFilterSubjects(keys...)) sub, err = kv.js.Subscribe("", update, subOpts...) } if err != nil { return nil, err } sub.mu.Lock() // If there were no pending messages at the time of the creation // of the consumer, send the marker. // Skip if UpdatesOnly() is set, since there will never be updates initially. if !o.updatesOnly { if sub.jsi != nil && sub.jsi.pending == 0 { w.initDone = true w.updates <- nil } else { // Set a timer to send the marker if we do not get any messages. w.initDoneTimer = time.AfterFunc(kv.js.opts.wait, func() { w.mu.Lock() defer w.mu.Unlock() if !w.initDone { w.initDone = true w.updates <- nil } }) } } else { // if UpdatesOnly was used, mark initialization as complete w.initDone = true } // Set us up to close when the waitForMessages func returns. sub.pDone = func(_ string) { w.mu.Lock() defer w.mu.Unlock() if w.initDoneTimer != nil { w.initDoneTimer.Stop() } close(w.updates) } sub.mu.Unlock() w.sub = sub return w, nil } // Watch will fire the callback when a key that matches the keys pattern is updated. // keys needs to be a valid NATS subject. func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { return kv.WatchFiltered([]string{keys}, opts...) } // Bucket returns the current bucket name (JetStream stream). func (kv *kvs) Bucket() string { return kv.name } // KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus type KeyValueBucketStatus struct { nfo *StreamInfo bucket string } // Bucket the name of the bucket func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } // Values is how many messages are in the bucket, including historical values func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } // History returns the configured history kept per key func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } // TTL is how long the bucket keeps values for func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } // BackingStore indicates what technology is used for storage of the bucket func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } // StreamInfo is the stream info retrieved to create the status func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } // Bytes is the size of the stream func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } // IsCompressed indicates if the data is compressed on disk func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } // Status retrieves the status and configuration of a bucket func (kv *kvs) Status() (KeyValueStatus, error) { nfo, err := kv.js.StreamInfo(kv.stream) if err != nil { return nil, err } return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil } // KeyValueStoreNames is used to retrieve a list of key value store names func (js *js) KeyValueStoreNames() <-chan string { ch := make(chan string) l := &streamNamesLister{js: js} l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") go func() { defer close(ch) for l.Next() { for _, name := range l.Page() { if !strings.HasPrefix(name, kvBucketNamePre) { continue } ch <- strings.TrimPrefix(name, kvBucketNamePre) } } }() return ch } // KeyValueStores is used to retrieve a list of key value store statuses func (js *js) KeyValueStores() <-chan KeyValueStatus { ch := make(chan KeyValueStatus) l := &streamLister{js: js} l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") go func() { defer close(ch) for l.Next() { for _, info := range l.Page() { if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { continue } ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} } } }() return ch } func mapStreamToKVS(js *js, info *StreamInfo) *kvs { bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) kv := &kvs{ name: bucket, stream: info.Config.Name, pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), js: js, // Determine if we need to use the JS prefix in front of Put and Delete operations useJSPfx: js.opts.pre != defaultAPIPrefix, useDirect: info.Config.AllowDirect, } // If we are mirroring, we will have mirror direct on, so just use the mirror name // and override use if m := info.Config.Mirror; m != nil { bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) if m.External != nil && m.External.APIPrefix != _EMPTY_ { kv.useJSPfx = false kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) } else { kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) } } return kv } nats.go-1.41.0/legacy_jetstream.md000066400000000000000000000033631477351342400170310ustar00rootroot00000000000000# Legacy JetStream API This is a documentation for the legacy JetStream API. A README for the current API can be found [here](jetstream/README.md) ## JetStream Basic Usage ```go import "github.com/nats-io/nats.go" // Connect to NATS nc, _ := nats.Connect(nats.DefaultURL) // Create JetStream Context js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256)) // Simple Stream Publisher js.Publish("ORDERS.scratch", []byte("hello")) // Simple Async Stream Publisher for i := 0; i < 500; i++ { js.PublishAsync("ORDERS.scratch", []byte("hello")) } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): fmt.Println("Did not resolve in time") } // Simple Async Ephemeral Consumer js.Subscribe("ORDERS.*", func(m *nats.Msg) { fmt.Printf("Received a JetStream message: %s\n", string(m.Data)) }) // Simple Sync Durable Consumer (optional SubOpts at the end) sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3)) m, err := sub.NextMsg(timeout) // Simple Pull Consumer sub, err := js.PullSubscribe("ORDERS.*", "MONITOR") msgs, err := sub.Fetch(10) // Unsubscribe sub.Unsubscribe() // Drain sub.Drain() ``` ## JetStream Basic Management ```go import "github.com/nats-io/nats.go" // Connect to NATS nc, _ := nats.Connect(nats.DefaultURL) // Create JetStream Context js, _ := nc.JetStream() // Create a Stream js.AddStream(&nats.StreamConfig{ Name: "ORDERS", Subjects: []string{"ORDERS.*"}, }) // Update a Stream js.UpdateStream(&nats.StreamConfig{ Name: "ORDERS", MaxBytes: 8, }) // Create a Consumer js.AddConsumer("ORDERS", &nats.ConsumerConfig{ Durable: "MONITOR", }) // Delete Consumer js.DeleteConsumer("ORDERS", "MONITOR") // Delete Stream js.DeleteStream("ORDERS") ``` nats.go-1.41.0/micro/000077500000000000000000000000001477351342400142715ustar00rootroot00000000000000nats.go-1.41.0/micro/README.md000066400000000000000000000211501477351342400155470ustar00rootroot00000000000000# NATS micro [![GoDoc](https://pkg.go.dev/badge/github.com/nats-io/nats.go/micro.svg)](https://pkg.go.dev/github.com/nats-io/nats.go/micro) - [Overview](#overview) - [Basic usage](#basic-usage) - [Endpoints and groups](#endpoints-and-groups) - [Discovery and Monitoring](#discovery-and-monitoring) - [Examples](#examples) - [Documentation](#documentation) ## Overview The `micro` package in the NATS.go library provides a simple way to create microservices that leverage NATS for scalability, load management and observability. ## Basic usage To start using the `micro` package, import it in your application: ```go import "github.com/nats-io/nats.go/micro" ``` The core of the `micro` package is the Service. A Service aggregates endpoints for handling application logic. Services are named and versioned. You create a Service using the `micro.NewService()` function, passing in the NATS connection and Service configuration. ```go nc, _ := nats.Connect(nats.DefaultURL) // request handler echoHandler := func(req micro.Request) { req.Respond(req.Data()) } srv, err := micro.AddService(nc, micro.Config{ Name: "EchoService", Version: "1.0.0", // base handler Endpoint: µ.EndpointConfig{ Subject: "svc.echo", Handler: micro.HandlerFunc(echoHandler), }, }) ``` After creating the service, it can be accessed by publishing a request on endpoint subject. For given configuration, run: ```sh nats req svc.echo "hello!" ``` To get: ```sh 17:37:32 Sending request on "svc.echo" 17:37:32 Received with rtt 365.875µs hello! ``` ## Endpoints and groups Base endpoint can be optionally configured on a service, but it is also possible to add more endpoints after the service is created. ```go srv, _ := micro.AddService(nc, config) // endpoint will be registered under "svc.add" subject err = srv.AddEndpoint("svc.add", micro.HandlerFunc(add)) ``` In the above example `svc.add` is an endpoint name and subject. It is possible have a different endpoint name then the endpoint subject by using `micro.WithEndpointSubject()` option in `AddEndpoint()`. ```go // endpoint will be registered under "svc.add" subject err = srv.AddEndpoint("Adder", micro.HandlerFunc(echoHandler), micro.WithEndpointSubject("svc.add")) ``` Endpoints can also be aggregated using groups. A group represents a common subject prefix used by all endpoints associated with it. ```go srv, _ := micro.AddService(nc, config) numbersGroup := srv.AddGroup("numbers") // endpoint will be registered under "numbers.add" subject _ = numbersGroup.AddEndpoint("add", micro.HandlerFunc(addHandler)) // endpoint will be registered under "numbers.multiply" subject _ = numbersGroup.AddEndpoint("multiply", micro.HandlerFunc(multiplyHandler)) ``` ## Customizing queue groups For each service, group and endpoint the queue group used to gather responses can be customized or disabled. If not provided a default queue group will be used (`q`). Customizing queue groups can be useful to e.g. implement fanout request pattern or hedged request pattern (to reduce tail latencies by only waiting for the first response for multiple service instances). Let's say we have multiple services listening on the same subject, but with different queue groups: ```go for i := 0; i < 5; i++ { srv, _ := micro.AddService(nc, micro.Config{ Name: "EchoService", Version: "1.0.0", QueueGroup: fmt.Sprintf("q-%d", i), // base handler Endpoint: µ.EndpointConfig{ Subject: "svc.echo", Handler: micro.HandlerFunc(echoHandler), }, }) } ``` In the client, we can send request to `svc.echo` to receive responses from all services registered on this subject (or wait only for the first response): ```go sub, _ := nc.SubscribeSync("rply") nc.PublishRequest("svc.echo", "rply", nil) for start := time.Now(); time.Since(start) < 5*time.Second; { msg, err := sub.NextMsg(1 * time.Second) if err != nil { break } fmt.Println("Received ", string(msg.Data)) } ``` Queue groups can be overwritten by setting them on groups and endpoints as well: ```go srv, _ := micro.AddService(nc, micro.Config{ Name: "EchoService", Version: "1.0.0", QueueGroup: "q1", }) g := srv.AddGroup("g", micro.WithGroupQueueGroup("q2")) // will be registered with queue group 'q2' from parent group g.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {})) // will be registered with queue group 'q3' g.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("q3")) ``` Similarly, queue groups can be disabled on service config, group and endpoint levels. If disabled, a standard NATS subscription will be created for the endpoint. ```go // disable queue group for the service srv, _ := micro.AddService(nc, micro.Config{ Name: "EchoService", Version: "1.0.0", QueueGroupDisabled: true, }) // create a group with queue group disabled srv.AddGroup("g", micro.WithEndpointQueueGroupDisabled()) // create an endpoint with queue group disabled srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroupDisabled()) ``` When disabling queue groups, same inheritance rules apply as for customizing queue groups. (service config -> group -> endpoint) ## Discovery and Monitoring Each service is assigned a unique ID on creation. A service instance is identified by service name and ID. Multiple services with the same name, but different IDs can be created. Each service exposes 3 endpoints when created: - PING - used for service discovery and RTT calculation - INFO - returns service configuration details (used subjects, service metadata etc.) - STATS - service statistics Each of those operations can be performed on 3 subjects: - all services: `$SRV.` - returns a response for each created service and service instance - by service name: `$SRV..` - returns a response for each service with given `service_name` - by service name and ID: `$SRV...` - returns a response for a service with given `service_name` and `service_id` For given configuration ```go nc, _ := nats.Connect("nats://localhost:4222") echoHandler := func(req micro.Request) { req.Respond(req.Data()) } config := micro.Config{ Name: "EchoService", Version: "1.0.0", Endpoint: µ.EndpointConfig{ Subject: "svc.echo", Handler: micro.HandlerFunc(echoHandler), }, } for i := 0; i < 3; i++ { srv, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } defer srv.Stop() } ``` Service IDs can be discovered by: ```sh nats req '$SRV.PING.EchoService' '' --replies=3 13:03:04 Sending request on "$SRV.PING.EchoService" 13:03:04 Received with rtt 1.302208ms {"name":"EchoService","id":"x3Yuiq7g7MoxhXdxk7i4K7","version":"1.0.0","metadata":{},"type":"io.nats.micro.v1.ping_response"} 13:03:04 Received with rtt 1.317ms {"name":"EchoService","id":"x3Yuiq7g7MoxhXdxk7i4Kt","version":"1.0.0","metadata":{},"type":"io.nats.micro.v1.ping_response"} 13:03:04 Received with rtt 1.320291ms {"name":"EchoService","id":"x3Yuiq7g7MoxhXdxk7i4Lf","version":"1.0.0","metadata":{},"type":"io.nats.micro.v1.ping_response"} ``` A specific service instance info can be retrieved: ```sh nats req '$SRV.INFO.EchoService.x3Yuiq7g7MoxhXdxk7i4K7' '' | jq 13:04:19 Sending request on "$SRV.INFO.EchoService.x3Yuiq7g7MoxhXdxk7i4K7" 13:04:19 Received with rtt 318.875µs { "name": "EchoService", "id": "x3Yuiq7g7MoxhXdxk7i4K7", "version": "1.0.0", "metadata": {}, "type": "io.nats.micro.v1.info_response", "description": "", "endpoints": [ { "name": "default", "subject": "svc.echo", "queue_group": "q", "metadata": null } ] } ``` To get statistics for this service: ```sh nats req '$SRV.STATS.EchoService.x3Yuiq7g7MoxhXdxk7i4K7' '' | jq 13:04:46 Sending request on "$SRV.STATS.EchoService.x3Yuiq7g7MoxhXdxk7i4K7" 13:04:46 Received with rtt 678.25µs { "name": "EchoService", "id": "x3Yuiq7g7MoxhXdxk7i4K7", "version": "1.0.0", "metadata": {}, "type": "io.nats.micro.v1.stats_response", "started": "2024-09-24T11:02:55.564771Z", "endpoints": [ { "name": "default", "subject": "svc.echo", "queue_group": "q", "num_requests": 0, "num_errors": 0, "last_error": "", "processing_time": 0, "average_processing_time": 0 } ] } ``` ## Examples For more detailed examples, refer to the `./test/example_test.go` directory in this package. ## Documentation The complete documentation is available on [GoDoc](https://godoc.org/github.com/nats-io/nats.go/micro). nats.go-1.41.0/micro/example_handler_test.go000066400000000000000000000025561477351342400210170ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro_test import ( "log" "strconv" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/micro" ) type rectangle struct { height int width int } // Handle is an implementation of micro.Handler used to // calculate the area of a rectangle func (r rectangle) Handle(req micro.Request) { area := r.height * r.width req.Respond([]byte(strconv.Itoa(area))) } func ExampleHandler() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() rec := rectangle{10, 5} config := micro.Config{ Name: "RectangleAreaService", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "area.rectangle", Handler: rec, }, } svc, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } defer svc.Stop() } nats.go-1.41.0/micro/example_package_test.go000066400000000000000000000051031477351342400207640ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro_test import ( "fmt" "log" "strconv" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/micro" ) func Example() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() // endpoint handler - in this case, HandlerFunc is used, // which is a built-in implementation of Handler interface echoHandler := func(req micro.Request) { req.Respond(req.Data()) } // second endpoint incrementHandler := func(req micro.Request) { val, err := strconv.Atoi(string(req.Data())) if err != nil { req.Error("400", "request data should be a number", nil) return } responseData := val + 1 req.Respond([]byte(strconv.Itoa(responseData))) } // third endpoint multiplyHandler := func(req micro.Request) { val, err := strconv.Atoi(string(req.Data())) if err != nil { req.Error("400", "request data should be a number", nil) return } responseData := val * 2 req.Respond([]byte(strconv.Itoa(responseData))) } config := micro.Config{ Name: "IncrementService", Version: "0.1.0", Description: "Increment numbers", // base handler - for simple services with single endpoints this is sufficient Endpoint: µ.EndpointConfig{ Subject: "echo", Handler: micro.HandlerFunc(echoHandler), }, } svc, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } defer svc.Stop() // add a group to aggregate endpoints under common prefix numbers := svc.AddGroup("numbers") // register endpoints in a group err = numbers.AddEndpoint("Increment", micro.HandlerFunc(incrementHandler)) if err != nil { log.Fatal(err) } err = numbers.AddEndpoint("Multiply", micro.HandlerFunc(multiplyHandler)) if err != nil { log.Fatal(err) } // send a request to a service resp, err := nc.Request("numbers.Increment", []byte("3"), 1*time.Second) if err != nil { log.Fatal(err) } responseVal, err := strconv.Atoi(string(resp.Data)) if err != nil { log.Fatal(err) } fmt.Println(responseVal) } nats.go-1.41.0/micro/example_test.go000066400000000000000000000171331477351342400173170ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro_test import ( "context" "fmt" "log" "reflect" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/micro" ) func ExampleAddService() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() echoHandler := func(req micro.Request) { req.Respond(req.Data()) } config := micro.Config{ Name: "EchoService", Version: "1.0.0", Description: "Send back what you receive", // DoneHandler can be set to customize behavior on stopping a service. DoneHandler: func(srv micro.Service) { info := srv.Info() fmt.Printf("stopped service %q with ID %q\n", info.Name, info.ID) }, // ErrorHandler can be used to customize behavior on service execution error. ErrorHandler: func(srv micro.Service, err *micro.NATSError) { info := srv.Info() fmt.Printf("Service %q returned an error on subject %q: %s", info.Name, err.Subject, err.Description) }, // optional base handler Endpoint: µ.EndpointConfig{ Subject: "echo", Handler: micro.HandlerFunc(echoHandler), }, } srv, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } defer srv.Stop() } func ExampleService_AddEndpoint() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() echoHandler := func(req micro.Request) { req.Respond(req.Data()) } config := micro.Config{ Name: "EchoService", Version: "1.0.0", } srv, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } // endpoint will be registered under "Echo" subject err = srv.AddEndpoint("Echo", micro.HandlerFunc(echoHandler)) if err != nil { log.Fatal(err) } } func ExampleWithEndpointSubject() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() echoHandler := func(req micro.Request) { req.Respond(req.Data()) } config := micro.Config{ Name: "EchoService", Version: "1.0.0", } srv, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } // endpoint will be registered under "service.echo" subject err = srv.AddEndpoint("Echo", micro.HandlerFunc(echoHandler), micro.WithEndpointSubject("service.echo")) if err != nil { log.Fatal(err) } } func ExampleService_AddGroup() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() echoHandler := func(req micro.Request) { req.Respond(req.Data()) } config := micro.Config{ Name: "EchoService", Version: "1.0.0", } srv, err := micro.AddService(nc, config) if err != nil { log.Fatal(err) } v1 := srv.AddGroup("v1") // endpoint will be registered under "v1.Echo" subject err = v1.AddEndpoint("Echo", micro.HandlerFunc(echoHandler)) if err != nil { log.Fatal(err) } } func ExampleService_Info() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() config := micro.Config{ Name: "EchoService", } srv, _ := micro.AddService(nc, config) // service info info := srv.Info() fmt.Println(info.ID) fmt.Println(info.Name) fmt.Println(info.Description) fmt.Println(info.Version) fmt.Println(info.Endpoints) } func ExampleService_Stats() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() config := micro.Config{ Name: "EchoService", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "echo", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, } srv, _ := micro.AddService(nc, config) // stats of a service instance stats := srv.Stats() fmt.Println(stats.Endpoints[0].AverageProcessingTime) fmt.Println(stats.Endpoints[0].ProcessingTime) } func ExampleService_Stop() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() config := micro.Config{ Name: "EchoService", Version: "0.1.0", } srv, _ := micro.AddService(nc, config) // stop a service err = srv.Stop() if err != nil { log.Fatal(err) } // stop is idempotent so multiple executions will not return an error err = srv.Stop() if err != nil { log.Fatal(err) } } func ExampleService_Stopped() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() config := micro.Config{ Name: "EchoService", Version: "0.1.0", } srv, _ := micro.AddService(nc, config) // stop a service err = srv.Stop() if err != nil { log.Fatal(err) } if srv.Stopped() { fmt.Println("service stopped") } } func ExampleService_Reset() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() config := micro.Config{ Name: "EchoService", Version: "0.1.0", } srv, _ := micro.AddService(nc, config) // reset endpoint stats on this service srv.Reset() empty := micro.Stats{ ServiceIdentity: srv.Info().ServiceIdentity, } if !reflect.DeepEqual(srv.Stats(), empty) { log.Fatal("Expected endpoint stats to be empty") } } func ExampleContextHandler() { nc, err := nats.Connect("127.0.0.1:4222") if err != nil { log.Fatal(err) } defer nc.Close() handler := func(ctx context.Context, req micro.Request) { select { case <-ctx.Done(): req.Error("400", "context canceled", nil) default: req.Respond([]byte("ok")) } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() config := micro.Config{ Name: "EchoService", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "echo", Handler: micro.ContextHandler(ctx, handler), }, } srv, _ := micro.AddService(nc, config) defer srv.Stop() } func ExampleControlSubject() { // subject used to get PING from all services subjectPINGAll, _ := micro.ControlSubject(micro.PingVerb, "", "") fmt.Println(subjectPINGAll) // subject used to get PING from services with provided name subjectPINGName, _ := micro.ControlSubject(micro.PingVerb, "CoolService", "") fmt.Println(subjectPINGName) // subject used to get PING from a service with provided name and ID subjectPINGInstance, _ := micro.ControlSubject(micro.PingVerb, "CoolService", "123") fmt.Println(subjectPINGInstance) // Output: // $SRV.PING // $SRV.PING.CoolService // $SRV.PING.CoolService.123 } func ExampleRequest_Respond() { handler := func(req micro.Request) { // respond to the request if err := req.Respond(req.Data()); err != nil { log.Fatal(err) } } fmt.Printf("%T", handler) } func ExampleRequest_RespondJSON() { type Point struct { X int `json:"x"` Y int `json:"y"` } handler := func(req micro.Request) { resp := Point{5, 10} // respond to the request // response will be serialized to {"x":5,"y":10} if err := req.RespondJSON(resp); err != nil { log.Fatal(err) } } fmt.Printf("%T", handler) } func ExampleRequest_Error() { handler := func(req micro.Request) { // respond with an error // Error sets Nats-Service-Error and Nats-Service-Error-Code headers in the response if err := req.Error("400", "bad request", []byte(`{"error": "value should be a number"}`)); err != nil { log.Fatal(err) } } fmt.Printf("%T", handler) } nats.go-1.41.0/micro/request.go000066400000000000000000000131161477351342400163120ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro import ( "context" "encoding/json" "errors" "fmt" "github.com/nats-io/nats.go" ) type ( // Handler is used to respond to service requests. Handler interface { Handle(Request) } // HandlerFunc is a function implementing [Handler]. // It allows using a function as a request handler, without having to implement Handle // on a separate type. HandlerFunc func(Request) // Request represents service request available in the service handler. // It exposes methods to respond to the request, as well as // getting the request data and headers. Request interface { // Respond sends the response for the request. // Additional headers can be passed using [WithHeaders] option. Respond([]byte, ...RespondOpt) error // RespondJSON marshals the given response value and responds to the request. // Additional headers can be passed using [WithHeaders] option. RespondJSON(any, ...RespondOpt) error // Error prepares and publishes error response from a handler. // A response error should be set containing an error code and description. // Optionally, data can be set as response payload. Error(code, description string, data []byte, opts ...RespondOpt) error // Data returns request data. Data() []byte // Headers returns request headers. Headers() Headers // Subject returns underlying NATS message subject. Subject() string // Reply returns underlying NATS message reply subject. Reply() string } // Headers is a wrapper around [*nats.Header] Headers nats.Header // RespondOpt is a function used to configure [Request.Respond] and [Request.RespondJSON] methods. RespondOpt func(*nats.Msg) // request is a default implementation of Request interface request struct { msg *nats.Msg respondError error } serviceError struct { Code string `json:"code"` Description string `json:"description"` } ) var ( ErrRespond = errors.New("NATS error when sending response") ErrMarshalResponse = errors.New("marshaling response") ErrArgRequired = errors.New("argument required") ) func (fn HandlerFunc) Handle(req Request) { fn(req) } // ContextHandler is a helper function used to utilize [context.Context] // in request handlers. func ContextHandler(ctx context.Context, handler func(context.Context, Request)) Handler { return HandlerFunc(func(req Request) { handler(ctx, req) }) } // Respond sends the response for the request. // Additional headers can be passed using [WithHeaders] option. func (r *request) Respond(response []byte, opts ...RespondOpt) error { respMsg := &nats.Msg{ Data: response, } for _, opt := range opts { opt(respMsg) } if err := r.msg.RespondMsg(respMsg); err != nil { r.respondError = fmt.Errorf("%w: %s", ErrRespond, err) return r.respondError } return nil } // RespondJSON marshals the given response value and responds to the request. // Additional headers can be passed using [WithHeaders] option. func (r *request) RespondJSON(response any, opts ...RespondOpt) error { resp, err := json.Marshal(response) if err != nil { return ErrMarshalResponse } return r.Respond(resp, opts...) } // Error prepares and publishes error response from a handler. // A response error should be set containing an error code and description. // Optionally, data can be set as response payload. func (r *request) Error(code, description string, data []byte, opts ...RespondOpt) error { if code == "" { return fmt.Errorf("%w: error code", ErrArgRequired) } if description == "" { return fmt.Errorf("%w: description", ErrArgRequired) } response := &nats.Msg{ Header: nats.Header{ ErrorHeader: []string{description}, ErrorCodeHeader: []string{code}, }, } for _, opt := range opts { opt(response) } response.Data = data if err := r.msg.RespondMsg(response); err != nil { r.respondError = err return err } r.respondError = &serviceError{ Code: code, Description: description, } return nil } // WithHeaders can be used to configure response with custom headers. func WithHeaders(headers Headers) RespondOpt { return func(m *nats.Msg) { if m.Header == nil { m.Header = nats.Header(headers) return } for k, v := range headers { m.Header[k] = v } } } // Data returns request data. func (r *request) Data() []byte { return r.msg.Data } // Headers returns request headers. func (r *request) Headers() Headers { return Headers(r.msg.Header) } // Subject returns underlying NATS message subject. func (r *request) Subject() string { return r.msg.Subject } // Reply returns underlying NATS message reply subject. func (r *request) Reply() string { return r.msg.Reply } // Get gets the first value associated with the given key. // It is case-sensitive. func (h Headers) Get(key string) string { return nats.Header(h).Get(key) } // Values returns all values associated with the given key. // It is case-sensitive. func (h Headers) Values(key string) []string { return nats.Header(h).Values(key) } func (e *serviceError) Error() string { return fmt.Sprintf("%s:%s", e.Code, e.Description) } nats.go-1.41.0/micro/service.go000066400000000000000000000643161477351342400162720ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro import ( "encoding/json" "errors" "fmt" "regexp" "strings" "sync" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) type ( // Service exposes methods to operate on a service instance. Service interface { // AddEndpoint registers endpoint with given name on a specific subject. AddEndpoint(string, Handler, ...EndpointOpt) error // AddGroup returns a Group interface, allowing for more complex endpoint topologies. // A group can be used to register endpoints with given prefix. AddGroup(string, ...GroupOpt) Group // Info returns the service info. Info() Info // Stats returns statistics for the service endpoint and all monitoring endpoints. Stats() Stats // Reset resets all statistics (for all endpoints) on a service instance. Reset() // Stop drains the endpoint subscriptions and marks the service as stopped. Stop() error // Stopped informs whether [Stop] was executed on the service. Stopped() bool } // Group allows for grouping endpoints on a service. // // Endpoints created using AddEndpoint will be grouped under common prefix (group name) // New groups can also be derived from a group using AddGroup. Group interface { // AddGroup creates a new group, prefixed by this group's prefix. AddGroup(string, ...GroupOpt) Group // AddEndpoint registers new endpoints on a service. // The endpoint's subject will be prefixed with the group prefix. AddEndpoint(string, Handler, ...EndpointOpt) error } EndpointOpt func(*endpointOpts) error GroupOpt func(*groupOpts) endpointOpts struct { subject string metadata map[string]string queueGroup string qgDisabled bool } groupOpts struct { queueGroup string qgDisabled bool } // ErrHandler is a function used to configure a custom error handler for a service, ErrHandler func(Service, *NATSError) // DoneHandler is a function used to configure a custom done handler for a service. DoneHandler func(Service) // StatsHandler is a function used to configure a custom STATS endpoint. // It should return a value which can be serialized to JSON. StatsHandler func(*Endpoint) any // ServiceIdentity contains fields helping to identity a service instance. ServiceIdentity struct { Name string `json:"name"` ID string `json:"id"` Version string `json:"version"` Metadata map[string]string `json:"metadata"` } // Stats is the type returned by STATS monitoring endpoint. // It contains stats of all registered endpoints. Stats struct { ServiceIdentity Type string `json:"type"` Started time.Time `json:"started"` Endpoints []*EndpointStats `json:"endpoints"` } // EndpointStats contains stats for a specific endpoint. EndpointStats struct { Name string `json:"name"` Subject string `json:"subject"` QueueGroup string `json:"queue_group"` NumRequests int `json:"num_requests"` NumErrors int `json:"num_errors"` LastError string `json:"last_error"` ProcessingTime time.Duration `json:"processing_time"` AverageProcessingTime time.Duration `json:"average_processing_time"` Data json.RawMessage `json:"data,omitempty"` } // Ping is the response type for PING monitoring endpoint. Ping struct { ServiceIdentity Type string `json:"type"` } // Info is the basic information about a service type. Info struct { ServiceIdentity Type string `json:"type"` Description string `json:"description"` Endpoints []EndpointInfo `json:"endpoints"` } EndpointInfo struct { Name string `json:"name"` Subject string `json:"subject"` QueueGroup string `json:"queue_group"` Metadata map[string]string `json:"metadata"` } // Endpoint manages a service endpoint. Endpoint struct { EndpointConfig Name string service *service stats EndpointStats subscription *nats.Subscription } group struct { service *service prefix string queueGroup string queueGroupDisabled bool } // Verb represents a name of the monitoring service. Verb int64 // Config is a configuration of a service. Config struct { // Name represents the name of the service. Name string `json:"name"` // Endpoint is an optional endpoint configuration. // More complex, multi-endpoint services can be configured using // Service.AddGroup and Service.AddEndpoint methods. Endpoint *EndpointConfig `json:"endpoint"` // Version is a SemVer compatible version string. Version string `json:"version"` // Description of the service. Description string `json:"description"` // Metadata annotates the service Metadata map[string]string `json:"metadata,omitempty"` // QueueGroup can be used to override the default queue group name. QueueGroup string `json:"queue_group"` // QueueGroupDisabled disables the queue group for the service. QueueGroupDisabled bool `json:"queue_group_disabled"` // StatsHandler is a user-defined custom function. // used to calculate additional service stats. StatsHandler StatsHandler // DoneHandler is invoked when all service subscription are stopped. DoneHandler DoneHandler // ErrorHandler is invoked on any nats-related service error. ErrorHandler ErrHandler } EndpointConfig struct { // Subject on which the endpoint is registered. Subject string // Handler used by the endpoint. Handler Handler // Metadata annotates the service Metadata map[string]string `json:"metadata,omitempty"` // QueueGroup can be used to override the default queue group name. QueueGroup string `json:"queue_group"` // QueueGroupDisabled disables the queue group for the endpoint. QueueGroupDisabled bool `json:"queue_group_disabled"` } // NATSError represents an error returned by a NATS Subscription. // It contains a subject on which the subscription failed, so that // it can be linked with a specific service endpoint. NATSError struct { Subject string Description string } // service represents a configured NATS service. // It should be created using [Add] in order to configure the appropriate NATS subscriptions // for request handler and monitoring. service struct { // Config contains a configuration of the service Config m sync.Mutex id string endpoints []*Endpoint verbSubs map[string]*nats.Subscription started time.Time nc *nats.Conn natsHandlers handlers stopped bool asyncDispatcher asyncCallbacksHandler } handlers struct { closed nats.ConnHandler asyncErr nats.ErrHandler } asyncCallbacksHandler struct { cbQueue chan func() closed bool } ) const ( // Queue Group name used across all services DefaultQueueGroup = "q" // APIPrefix is the root of all control subjects APIPrefix = "$SRV" ) // Service Error headers const ( ErrorHeader = "Nats-Service-Error" ErrorCodeHeader = "Nats-Service-Error-Code" ) // Verbs being used to set up a specific control subject. const ( PingVerb Verb = iota StatsVerb InfoVerb ) const ( InfoResponseType = "io.nats.micro.v1.info_response" PingResponseType = "io.nats.micro.v1.ping_response" StatsResponseType = "io.nats.micro.v1.stats_response" ) var ( // this regular expression is suggested regexp for semver validation: https://semver.org/ semVerRegexp = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) nameRegexp = regexp.MustCompile(`^[A-Za-z0-9\-_]+$`) subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`) ) // Common errors returned by the Service framework. var ( // ErrConfigValidation is returned when service configuration is invalid ErrConfigValidation = errors.New("validation") // ErrVerbNotSupported is returned when invalid [Verb] is used (PING, INFO, STATS) ErrVerbNotSupported = errors.New("unsupported verb") // ErrServiceNameRequired is returned when attempting to generate control subject with ID but empty name ErrServiceNameRequired = errors.New("service name is required to generate ID control subject") ) func (s Verb) String() string { switch s { case PingVerb: return "PING" case StatsVerb: return "STATS" case InfoVerb: return "INFO" default: return "" } } // AddService adds a microservice. // It will enable internal common services (PING, STATS and INFO). // Request handlers have to be registered separately using Service.AddEndpoint. // A service name, version and Endpoint configuration are required to add a service. // AddService returns a [Service] interface, allowing service management. // Each service is assigned a unique ID. func AddService(nc *nats.Conn, config Config) (Service, error) { if err := config.valid(); err != nil { return nil, err } if config.Metadata == nil { config.Metadata = map[string]string{} } id := nuid.Next() svc := &service{ Config: config, nc: nc, id: id, asyncDispatcher: asyncCallbacksHandler{ cbQueue: make(chan func(), 100), }, verbSubs: make(map[string]*nats.Subscription), endpoints: make([]*Endpoint, 0), } // Add connection event (closed, error) wrapper handlers. If the service has // custom callbacks, the events are queued and invoked by the same // goroutine, starting now. go svc.asyncDispatcher.run() svc.wrapConnectionEventCallbacks() if config.Endpoint != nil { opts := []EndpointOpt{WithEndpointSubject(config.Endpoint.Subject)} if config.Endpoint.Metadata != nil { opts = append(opts, WithEndpointMetadata(config.Endpoint.Metadata)) } if config.Endpoint.QueueGroup != "" { opts = append(opts, WithEndpointQueueGroup(config.Endpoint.QueueGroup)) } else if config.QueueGroup != "" { opts = append(opts, WithEndpointQueueGroup(config.QueueGroup)) } if err := svc.AddEndpoint("default", config.Endpoint.Handler, opts...); err != nil { svc.asyncDispatcher.close() return nil, err } } // Setup internal subscriptions. pingResponse := Ping{ ServiceIdentity: svc.serviceIdentity(), Type: PingResponseType, } handleVerb := func(verb Verb, valuef func() any) func(req Request) { return func(req Request) { response, _ := json.Marshal(valuef()) if err := req.Respond(response); err != nil { if err := req.Error("500", fmt.Sprintf("Error handling %s request: %s", verb, err), nil); err != nil && config.ErrorHandler != nil { svc.asyncDispatcher.push(func() { config.ErrorHandler(svc, &NATSError{req.Subject(), err.Error()}) }) } } } } for verb, source := range map[Verb]func() any{ InfoVerb: func() any { return svc.Info() }, PingVerb: func() any { return pingResponse }, StatsVerb: func() any { return svc.Stats() }, } { handler := handleVerb(verb, source) if err := svc.addVerbHandlers(nc, verb, handler); err != nil { svc.asyncDispatcher.close() return nil, err } } svc.started = time.Now().UTC() return svc, nil } func (s *service) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error { var options endpointOpts for _, opt := range opts { if err := opt(&options); err != nil { return err } } subject := name if options.subject != "" { subject = options.subject } queueGroup, noQueue := resolveQueueGroup(options.queueGroup, s.Config.QueueGroup, options.qgDisabled, s.Config.QueueGroupDisabled) return addEndpoint(s, name, subject, handler, options.metadata, queueGroup, noQueue) } func addEndpoint(s *service, name, subject string, handler Handler, metadata map[string]string, queueGroup string, noQueue bool) error { if !nameRegexp.MatchString(name) { return fmt.Errorf("%w: invalid endpoint name", ErrConfigValidation) } if !subjectRegexp.MatchString(subject) { return fmt.Errorf("%w: invalid endpoint subject", ErrConfigValidation) } if !subjectRegexp.MatchString(queueGroup) { return fmt.Errorf("%w: invalid endpoint queue group", ErrConfigValidation) } endpoint := &Endpoint{ service: s, EndpointConfig: EndpointConfig{ Subject: subject, Handler: handler, Metadata: metadata, QueueGroup: queueGroup, QueueGroupDisabled: noQueue, }, Name: name, } var sub *nats.Subscription var err error if !noQueue { sub, err = s.nc.QueueSubscribe( subject, queueGroup, func(m *nats.Msg) { s.reqHandler(endpoint, &request{msg: m}) }, ) } else { sub, err = s.nc.Subscribe( subject, func(m *nats.Msg) { s.reqHandler(endpoint, &request{msg: m}) }, ) } if err != nil { return err } s.m.Lock() endpoint.subscription = sub s.endpoints = append(s.endpoints, endpoint) endpoint.stats = EndpointStats{ Name: name, Subject: subject, QueueGroup: queueGroup, } s.m.Unlock() return nil } func (s *service) AddGroup(name string, opts ...GroupOpt) Group { var o groupOpts for _, opt := range opts { opt(&o) } queueGroup, noQueue := resolveQueueGroup(o.queueGroup, s.Config.QueueGroup, o.qgDisabled, s.Config.QueueGroupDisabled) return &group{ service: s, prefix: name, queueGroup: queueGroup, queueGroupDisabled: noQueue, } } // dispatch is responsible for calling any async callbacks func (ac *asyncCallbacksHandler) run() { for { f, ok := <-ac.cbQueue if !ok || f == nil { return } f() } } // dispatch is responsible for calling any async callbacks func (ac *asyncCallbacksHandler) push(f func()) { ac.cbQueue <- f } func (ac *asyncCallbacksHandler) close() { if ac.closed { return } close(ac.cbQueue) ac.closed = true } func (c *Config) valid() error { if !nameRegexp.MatchString(c.Name) { return fmt.Errorf("%w: service name: name should not be empty and should consist of alphanumerical characters, dashes and underscores", ErrConfigValidation) } if !semVerRegexp.MatchString(c.Version) { return fmt.Errorf("%w: version: version should not be empty should match the SemVer format", ErrConfigValidation) } if c.QueueGroup != "" && !subjectRegexp.MatchString(c.QueueGroup) { return fmt.Errorf("%w: queue group: invalid queue group name", ErrConfigValidation) } return nil } func (s *service) wrapConnectionEventCallbacks() { s.m.Lock() defer s.m.Unlock() s.natsHandlers.closed = s.nc.ClosedHandler() if s.natsHandlers.closed != nil { s.nc.SetClosedHandler(func(c *nats.Conn) { s.Stop() s.natsHandlers.closed(c) }) } else { s.nc.SetClosedHandler(func(c *nats.Conn) { s.Stop() }) } s.natsHandlers.asyncErr = s.nc.ErrorHandler() if s.natsHandlers.asyncErr != nil { s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) { if sub == nil { s.natsHandlers.asyncErr(c, sub, err) return } endpoint, match := s.matchSubscriptionSubject(sub.Subject) if !match { s.natsHandlers.asyncErr(c, sub, err) return } if s.Config.ErrorHandler != nil { s.Config.ErrorHandler(s, &NATSError{ Subject: sub.Subject, Description: err.Error(), }) } s.m.Lock() if endpoint != nil { endpoint.stats.NumErrors++ endpoint.stats.LastError = err.Error() } s.m.Unlock() if stopErr := s.Stop(); stopErr != nil { s.natsHandlers.asyncErr(c, sub, errors.Join(err, fmt.Errorf("stopping service: %w", stopErr))) } else { s.natsHandlers.asyncErr(c, sub, err) } }) } else { s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) { if sub == nil { return } endpoint, match := s.matchSubscriptionSubject(sub.Subject) if !match { return } if s.Config.ErrorHandler != nil { s.Config.ErrorHandler(s, &NATSError{ Subject: sub.Subject, Description: err.Error(), }) } s.m.Lock() if endpoint != nil { endpoint.stats.NumErrors++ endpoint.stats.LastError = err.Error() } s.m.Unlock() s.Stop() }) } } func unwrapConnectionEventCallbacks(nc *nats.Conn, handlers handlers) { if nc.IsClosed() { return } nc.SetClosedHandler(handlers.closed) nc.SetErrorHandler(handlers.asyncErr) } func (s *service) matchSubscriptionSubject(subj string) (*Endpoint, bool) { s.m.Lock() defer s.m.Unlock() for _, verbSub := range s.verbSubs { if verbSub.Subject == subj { return nil, true } } for _, e := range s.endpoints { if matchEndpointSubject(e.Subject, subj) { return e, true } } return nil, false } func matchEndpointSubject(endpointSubject, literalSubject string) bool { subjectTokens := strings.Split(literalSubject, ".") endpointTokens := strings.Split(endpointSubject, ".") if len(endpointTokens) > len(subjectTokens) { return false } for i, et := range endpointTokens { if i == len(endpointTokens)-1 && et == ">" { return true } if et != subjectTokens[i] && et != "*" { return false } } return true } // addVerbHandlers generates control handlers for a specific verb. // Each request generates 3 subscriptions, one for the general verb // affecting all services written with the framework, one that handles // all services of a particular kind, and finally a specific service instance. func (svc *service) addVerbHandlers(nc *nats.Conn, verb Verb, handler HandlerFunc) error { name := fmt.Sprintf("%s-all", verb.String()) if err := svc.addInternalHandler(nc, verb, "", "", name, handler); err != nil { return err } name = fmt.Sprintf("%s-kind", verb.String()) if err := svc.addInternalHandler(nc, verb, svc.Config.Name, "", name, handler); err != nil { return err } return svc.addInternalHandler(nc, verb, svc.Config.Name, svc.id, verb.String(), handler) } // addInternalHandler registers a control subject handler. func (s *service) addInternalHandler(nc *nats.Conn, verb Verb, kind, id, name string, handler HandlerFunc) error { subj, err := ControlSubject(verb, kind, id) if err != nil { if stopErr := s.Stop(); stopErr != nil { return errors.Join(err, fmt.Errorf("stopping service: %w", stopErr)) } return err } s.verbSubs[name], err = nc.Subscribe(subj, func(msg *nats.Msg) { handler(&request{msg: msg}) }) if err != nil { if stopErr := s.Stop(); stopErr != nil { return errors.Join(err, fmt.Errorf("stopping service: %w", stopErr)) } return err } return nil } // reqHandler invokes the service request handler and modifies service stats func (s *service) reqHandler(endpoint *Endpoint, req *request) { start := time.Now() endpoint.Handler.Handle(req) s.m.Lock() endpoint.stats.NumRequests++ endpoint.stats.ProcessingTime += time.Since(start) avgProcessingTime := endpoint.stats.ProcessingTime.Nanoseconds() / int64(endpoint.stats.NumRequests) endpoint.stats.AverageProcessingTime = time.Duration(avgProcessingTime) if req.respondError != nil { endpoint.stats.NumErrors++ endpoint.stats.LastError = req.respondError.Error() } s.m.Unlock() } // Stop drains the endpoint subscriptions and marks the service as stopped. func (s *service) Stop() error { s.m.Lock() defer s.m.Unlock() if s.stopped { return nil } for _, e := range s.endpoints { if err := e.stop(); err != nil { fmt.Println("Error stopping endpoint: ", err) return err } } var keys []string for key, sub := range s.verbSubs { keys = append(keys, key) if err := sub.Drain(); err != nil { // connection is closed so draining is not possible if errors.Is(err, nats.ErrConnectionClosed) { break } return fmt.Errorf("draining subscription for subject %q: %w", sub.Subject, err) } } for _, key := range keys { delete(s.verbSubs, key) } unwrapConnectionEventCallbacks(s.nc, s.natsHandlers) s.stopped = true if s.DoneHandler != nil { s.asyncDispatcher.push(func() { s.DoneHandler(s) }) } s.asyncDispatcher.close() return nil } func (s *service) serviceIdentity() ServiceIdentity { return ServiceIdentity{ Name: s.Config.Name, ID: s.id, Version: s.Config.Version, Metadata: s.Config.Metadata, } } // Info returns information about the service func (s *service) Info() Info { s.m.Lock() defer s.m.Unlock() endpoints := make([]EndpointInfo, 0, len(s.endpoints)) for _, e := range s.endpoints { endpoints = append(endpoints, EndpointInfo{ Name: e.Name, Subject: e.Subject, QueueGroup: e.QueueGroup, Metadata: e.Metadata, }) } return Info{ ServiceIdentity: s.serviceIdentity(), Type: InfoResponseType, Description: s.Config.Description, Endpoints: endpoints, } } // Stats returns statistics for the service endpoint and all monitoring endpoints. func (s *service) Stats() Stats { s.m.Lock() defer s.m.Unlock() stats := Stats{ ServiceIdentity: s.serviceIdentity(), Endpoints: make([]*EndpointStats, 0), Type: StatsResponseType, Started: s.started, } for _, endpoint := range s.endpoints { endpointStats := &EndpointStats{ Name: endpoint.stats.Name, Subject: endpoint.stats.Subject, QueueGroup: endpoint.stats.QueueGroup, NumRequests: endpoint.stats.NumRequests, NumErrors: endpoint.stats.NumErrors, LastError: endpoint.stats.LastError, ProcessingTime: endpoint.stats.ProcessingTime, AverageProcessingTime: endpoint.stats.AverageProcessingTime, } if s.StatsHandler != nil { data, _ := json.Marshal(s.StatsHandler(endpoint)) endpointStats.Data = data } stats.Endpoints = append(stats.Endpoints, endpointStats) } return stats } // Reset resets all statistics on a service instance. func (s *service) Reset() { s.m.Lock() for _, endpoint := range s.endpoints { endpoint.reset() } s.started = time.Now().UTC() s.m.Unlock() } // Stopped informs whether [Stop] was executed on the service. func (s *service) Stopped() bool { s.m.Lock() defer s.m.Unlock() return s.stopped } func (e *NATSError) Error() string { return fmt.Sprintf("%q: %s", e.Subject, e.Description) } func (g *group) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error { var options endpointOpts for _, opt := range opts { if err := opt(&options); err != nil { return err } } subject := name if options.subject != "" { subject = options.subject } endpointSubject := fmt.Sprintf("%s.%s", g.prefix, subject) if g.prefix == "" { endpointSubject = subject } queueGroup, noQueue := resolveQueueGroup(options.queueGroup, g.queueGroup, options.qgDisabled, g.queueGroupDisabled) return addEndpoint(g.service, name, endpointSubject, handler, options.metadata, queueGroup, noQueue) } func resolveQueueGroup(customQG, parentQG string, disabled, parentDisabled bool) (string, bool) { if disabled { return "", true } if customQG != "" { return customQG, false } if parentDisabled { return "", true } if parentQG != "" { return parentQG, false } return DefaultQueueGroup, false } func (g *group) AddGroup(name string, opts ...GroupOpt) Group { var o groupOpts for _, opt := range opts { opt(&o) } queueGroup, noQueue := resolveQueueGroup(o.queueGroup, g.queueGroup, o.qgDisabled, g.queueGroupDisabled) parts := make([]string, 0, 2) if g.prefix != "" { parts = append(parts, g.prefix) } if name != "" { parts = append(parts, name) } prefix := strings.Join(parts, ".") return &group{ service: g.service, prefix: prefix, queueGroup: queueGroup, queueGroupDisabled: noQueue, } } func (e *Endpoint) stop() error { // Drain the subscription. If the connection is closed, draining is not possible // but we should still remove the endpoint from the service. if err := e.subscription.Drain(); err != nil && !errors.Is(err, nats.ErrConnectionClosed) { return fmt.Errorf("draining subscription for request handler: %w", err) } for i := 0; i < len(e.service.endpoints); i++ { if e.service.endpoints[i].Subject == e.Subject { if i != len(e.service.endpoints)-1 { e.service.endpoints = append(e.service.endpoints[:i], e.service.endpoints[i+1:]...) } else { e.service.endpoints = e.service.endpoints[:i] } i++ } } return nil } func (e *Endpoint) reset() { e.stats = EndpointStats{ Name: e.stats.Name, Subject: e.stats.Subject, } } // ControlSubject returns monitoring subjects used by the Service. // Providing a verb is mandatory (it should be one of Ping, Info or Stats). // Depending on whether kind and id are provided, ControlSubject will return one of the following: // - verb only: subject used to monitor all available services // - verb and kind: subject used to monitor services with the provided name // - verb, name and id: subject used to monitor an instance of a service with the provided ID func ControlSubject(verb Verb, name, id string) (string, error) { verbStr := verb.String() if verbStr == "" { return "", fmt.Errorf("%w: %q", ErrVerbNotSupported, verbStr) } if name == "" && id != "" { return "", ErrServiceNameRequired } if name == "" && id == "" { return fmt.Sprintf("%s.%s", APIPrefix, verbStr), nil } if id == "" { return fmt.Sprintf("%s.%s.%s", APIPrefix, verbStr, name), nil } return fmt.Sprintf("%s.%s.%s.%s", APIPrefix, verbStr, name, id), nil } func WithEndpointSubject(subject string) EndpointOpt { return func(e *endpointOpts) error { e.subject = subject return nil } } func WithEndpointMetadata(metadata map[string]string) EndpointOpt { return func(e *endpointOpts) error { e.metadata = metadata return nil } } func WithEndpointQueueGroup(queueGroup string) EndpointOpt { return func(e *endpointOpts) error { e.queueGroup = queueGroup return nil } } func WithEndpointQueueGroupDisabled() EndpointOpt { return func(e *endpointOpts) error { e.qgDisabled = true return nil } } func WithGroupQueueGroup(queueGroup string) GroupOpt { return func(g *groupOpts) { g.queueGroup = queueGroup } } func WithGroupQueueGroupDisabled() GroupOpt { return func(g *groupOpts) { g.qgDisabled = true } } nats.go-1.41.0/micro/test/000077500000000000000000000000001477351342400152505ustar00rootroot00000000000000nats.go-1.41.0/micro/test/main_test.go000066400000000000000000000013071477351342400175630ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro_test import ( "testing" "go.uber.org/goleak" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } nats.go-1.41.0/micro/test/service_test.go000066400000000000000000001441061477351342400203040ustar00rootroot00000000000000// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package micro_test import ( "bytes" "context" "encoding/json" "errors" "fmt" "math/rand" "reflect" "sync" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/micro" natsserver "github.com/nats-io/nats-server/v2/test" ) func TestServiceBasics(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() // Stub service. doAdd := func(req micro.Request) { if rand.Intn(10) == 0 { if err := req.Error("500", "Unexpected error!", nil); err != nil { t.Fatalf("Unexpected error when sending error response: %v", err) } return } // Happy Path. // Random delay between 5-10ms time.Sleep(5*time.Millisecond + time.Duration(rand.Intn(5))*time.Millisecond) if err := req.Respond([]byte("42")); err != nil { if err := req.Error("500", "Unexpected error!", nil); err != nil { t.Fatalf("Unexpected error when sending error response: %v", err) } return } } var svcs []micro.Service // Create 5 service responders. config := micro.Config{ Name: "CoolAddService", Version: "0.1.0", Description: "Add things together", Metadata: map[string]string{"basic": "metadata"}, Endpoint: µ.EndpointConfig{ Subject: "svc.add", Handler: micro.HandlerFunc(doAdd), }, } for i := 0; i < 5; i++ { svc, err := micro.AddService(nc, config) if err != nil { t.Fatalf("Expected to create Service, got %v", err) } defer svc.Stop() svcs = append(svcs, svc) } // Now send 50 requests. for i := 0; i < 50; i++ { _, err := nc.Request("svc.add", []byte(`{ "x": 22, "y": 11 }`), time.Second) if err != nil { t.Fatalf("Expected a response, got %v", err) } } for _, svc := range svcs { info := svc.Info() if info.Name != "CoolAddService" { t.Fatalf("Expected %q, got %q", "CoolAddService", info.Name) } if len(info.Description) == 0 || len(info.Version) == 0 { t.Fatalf("Expected non empty description and version") } if !reflect.DeepEqual(info.Metadata, map[string]string{"basic": "metadata"}) { t.Fatalf("invalid metadata: %v", info.Metadata) } } // Make sure we can request info, 1 response. // This could be exported as well as main ServiceImpl. subj, err := micro.ControlSubject(micro.InfoVerb, "CoolAddService", "") if err != nil { t.Fatalf("Failed to building info subject %v", err) } info, err := nc.Request(subj, nil, time.Second) if err != nil { t.Fatalf("Expected a response, got %v", err) } var inf micro.Info if err := json.Unmarshal(info.Data, &inf); err != nil { t.Fatalf("Unexpected error: %v", err) } // Ping all services. Multiple responses. inbox := nats.NewInbox() sub, err := nc.SubscribeSync(inbox) if err != nil { t.Fatalf("subscribe failed: %s", err) } pingSubject, err := micro.ControlSubject(micro.PingVerb, "CoolAddService", "") if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := nc.PublishRequest(pingSubject, inbox, nil); err != nil { t.Fatalf("Unexpected error: %v", err) } var pingCount int for { _, err := sub.NextMsg(250 * time.Millisecond) if err != nil { break } pingCount++ } if pingCount != 5 { t.Fatalf("Expected 5 ping responses, got: %d", pingCount) } // Get stats from all services statsInbox := nats.NewInbox() sub, err = nc.SubscribeSync(statsInbox) if err != nil { t.Fatalf("subscribe failed: %s", err) } statsSubject, err := micro.ControlSubject(micro.StatsVerb, "CoolAddService", "") if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := nc.PublishRequest(statsSubject, statsInbox, nil); err != nil { t.Fatalf("Unexpected error: %v", err) } stats := make([]micro.Stats, 0) var requestsNum int for { resp, err := sub.NextMsg(250 * time.Millisecond) if err != nil { break } var srvStats micro.Stats if err := json.Unmarshal(resp.Data, &srvStats); err != nil { t.Fatalf("Unexpected error: %v", err) } requestsNum += srvStats.Endpoints[0].NumRequests stats = append(stats, srvStats) } if len(stats) != 5 { t.Fatalf("Expected stats for 5 services, got: %d", len(stats)) } // Services should process 50 requests total if requestsNum != 50 { t.Fatalf("Expected a total of 50 requests processed, got: %d", requestsNum) } // Reset stats for a service svcs[0].Reset() if svcs[0].Stats().Endpoints[0].NumRequests != 0 { t.Fatalf("Expected empty stats after reset; got: %+v", svcs[0].Stats()) } } func TestAddService(t *testing.T) { testHandler := func(micro.Request) {} errNats := make(chan struct{}) errService := make(chan struct{}) closedNats := make(chan struct{}) doneService := make(chan struct{}) tests := []struct { name string givenConfig micro.Config endpoints []string natsClosedHandler nats.ConnHandler natsErrorHandler nats.ErrHandler asyncErrorSubject string expectedPing micro.Ping withError error }{ { name: "minimal config", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{"basic": "metadata"}, }, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{"basic": "metadata"}, }, }, }, { name: "with single base endpoint", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "test", Handler: micro.HandlerFunc(testHandler), Metadata: map[string]string{"basic": "endpoint_metadata"}, }, }, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, }, { name: "with base endpoint and additional endpoints", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "test", Handler: micro.HandlerFunc(testHandler), }, }, endpoints: []string{"func1", "func2", "func3"}, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, }, { name: "with done handler, no handlers on nats connection", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", DoneHandler: func(micro.Service) { doneService <- struct{}{} }, }, endpoints: []string{"func"}, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, }, { name: "with error handler, no handlers on nats connection", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", ErrorHandler: func(micro.Service, *micro.NATSError) { errService <- struct{}{} }, }, endpoints: []string{"func"}, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, asyncErrorSubject: "func", }, { name: "with error handler, no handlers on nats connection, error on monitoring subject", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", ErrorHandler: func(micro.Service, *micro.NATSError) { errService <- struct{}{} }, }, endpoints: []string{"func"}, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, asyncErrorSubject: "$SRV.PING.test_service", }, { name: "with done handler, append to nats handlers", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", DoneHandler: func(micro.Service) { doneService <- struct{}{} }, }, endpoints: []string{"func"}, natsClosedHandler: func(c *nats.Conn) { closedNats <- struct{}{} }, natsErrorHandler: func(*nats.Conn, *nats.Subscription, error) { errNats <- struct{}{} }, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, asyncErrorSubject: "test.sub", }, { name: "with error handler, append to nats handlers", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", DoneHandler: func(micro.Service) { doneService <- struct{}{} }, }, endpoints: []string{"func"}, natsClosedHandler: func(c *nats.Conn) { closedNats <- struct{}{} }, natsErrorHandler: func(*nats.Conn, *nats.Subscription, error) { errNats <- struct{}{} }, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, }, { name: "with error handler, append to nats handlers, error on monitoring subject", givenConfig: micro.Config{ Name: "test_service", Version: "0.1.0", DoneHandler: func(micro.Service) { doneService <- struct{}{} }, }, endpoints: []string{"func"}, natsClosedHandler: func(c *nats.Conn) { closedNats <- struct{}{} }, natsErrorHandler: func(*nats.Conn, *nats.Subscription, error) { errNats <- struct{}{} }, expectedPing: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", Metadata: map[string]string{}, }, }, asyncErrorSubject: "$SRV.PING.TEST_SERVICE", }, { name: "validation error, invalid service name", givenConfig: micro.Config{ Name: "test_service!", Version: "0.1.0", }, endpoints: []string{"func"}, withError: micro.ErrConfigValidation, }, { name: "validation error, invalid version", givenConfig: micro.Config{ Name: "test_service", Version: "abc", }, endpoints: []string{"func"}, withError: micro.ErrConfigValidation, }, { name: "validation error, invalid endpoint subject", givenConfig: micro.Config{ Name: "test_service", Version: "0.0.1", Endpoint: µ.EndpointConfig{ Subject: "endpoint subject", }, }, withError: micro.ErrConfigValidation, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.ErrorHandler(test.natsErrorHandler), nats.ClosedHandler(test.natsClosedHandler), ) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() // cleanup handlers since we invoke them manually defer nc.SetClosedHandler(nil) defer nc.SetErrorHandler(nil) srv, err := micro.AddService(nc, test.givenConfig) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, endpoint := range test.endpoints { if err := srv.AddEndpoint(endpoint, micro.HandlerFunc(testHandler)); err != nil { t.Fatalf("Unexpected error: %v", err) } } info := srv.Info() subjectsNum := len(test.endpoints) if test.givenConfig.Endpoint != nil { subjectsNum += 1 } if subjectsNum != len(info.Endpoints) { t.Fatalf("Invalid number of registered endpoints; want: %d; got: %d", subjectsNum, len(info.Endpoints)) } pingSubject, err := micro.ControlSubject(micro.PingVerb, info.Name, info.ID) if err != nil { t.Fatalf("Unexpected error: %v", err) } pingResp, err := nc.Request(pingSubject, nil, 1*time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } var ping micro.Ping if err := json.Unmarshal(pingResp.Data, &ping); err != nil { t.Fatalf("Unexpected error: %v", err) } test.expectedPing.ID = info.ID if !reflect.DeepEqual(test.expectedPing, ping) { t.Fatalf("Invalid ping response; want: %+v; got: %+v", test.expectedPing, ping) } if test.givenConfig.DoneHandler != nil { go nc.Opts.ClosedCB(nc) select { case <-doneService: case <-time.After(1 * time.Second): t.Fatalf("Timeout on DoneHandler") } if test.natsClosedHandler != nil { select { case <-closedNats: case <-time.After(1 * time.Second): t.Fatalf("Timeout on ClosedHandler") } } } if test.givenConfig.ErrorHandler != nil { go nc.Opts.AsyncErrorCB(nc, &nats.Subscription{Subject: test.asyncErrorSubject}, errors.New("oops")) select { case <-errService: case <-time.After(1 * time.Second): t.Fatalf("Timeout on ErrorHandler") } if test.natsErrorHandler != nil { select { case <-errNats: case <-time.After(1 * time.Second): t.Fatalf("Timeout on AsyncErrHandler") } } } if err := srv.Stop(); err != nil { t.Fatalf("Unexpected error when stopping the service: %v", err) } if test.natsClosedHandler != nil { go nc.Opts.ClosedCB(nc) select { case <-doneService: t.Fatalf("Expected to restore nats closed handler") case <-time.After(50 * time.Millisecond): } select { case <-closedNats: case <-time.After(1 * time.Second): t.Fatalf("Timeout on ClosedHandler") } } if test.natsErrorHandler != nil { go nc.Opts.AsyncErrorCB(nc, &nats.Subscription{Subject: test.asyncErrorSubject}, errors.New("oops")) select { case <-errService: t.Fatalf("Expected to restore nats error handler") case <-time.After(50 * time.Millisecond): } select { case <-errNats: case <-time.After(1 * time.Second): t.Fatalf("Timeout on AsyncErrHandler") } } }) } } func TestErrHandlerSubjectMatch(t *testing.T) { tests := []struct { name string endpointSubject string errSubject string expectServiceErr bool }{ { name: "exact match", endpointSubject: "foo.bar.baz", errSubject: "foo.bar.baz", expectServiceErr: true, }, { name: "match with *", endpointSubject: "foo.*.baz", errSubject: "foo.bar.baz", expectServiceErr: true, }, { name: "match with >", endpointSubject: "foo.bar.>", errSubject: "foo.bar.baz.1", expectServiceErr: true, }, { name: "monitoring handler", endpointSubject: "foo.bar.>", errSubject: "$SRV.PING", expectServiceErr: true, }, { name: "endpoint longer than subject", endpointSubject: "foo.bar.baz", errSubject: "foo.bar", expectServiceErr: false, }, { name: "no match", endpointSubject: "foo.bar.baz", errSubject: "foo.baz.bar", expectServiceErr: false, }, { name: "no match with *", endpointSubject: "foo.*.baz", errSubject: "foo.bar.foo", expectServiceErr: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { coreNatsAsyncErrors := []nats.ErrHandler{nil, func(c *nats.Conn, s *nats.Subscription, err error) {}} for _, cb := range coreNatsAsyncErrors { errChan := make(chan struct{}) errHandler := func(s micro.Service, err *micro.NATSError) { errChan <- struct{}{} } s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() nc.SetErrorHandler(cb) svc, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", ErrorHandler: micro.ErrHandler(errHandler), Endpoint: µ.EndpointConfig{ Subject: test.endpointSubject, Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer svc.Stop() go nc.Opts.AsyncErrorCB(nc, &nats.Subscription{Subject: test.errSubject}, errors.New("oops")) if test.expectServiceErr { select { case <-errChan: case <-time.After(10 * time.Millisecond): t.Fatalf("Expected service error callback") } } else { select { case <-errChan: t.Fatalf("Expected no service error callback") case <-time.After(10 * time.Millisecond): } } } }) } } func TestGroups(t *testing.T) { tests := []struct { name string endpointName string groups []string expectedEndpoint micro.EndpointInfo }{ { name: "no groups", endpointName: "foo", expectedEndpoint: micro.EndpointInfo{ Name: "foo", Subject: "foo", QueueGroup: "q", }, }, { name: "single group", endpointName: "foo", groups: []string{"g1"}, expectedEndpoint: micro.EndpointInfo{ Name: "foo", Subject: "g1.foo", QueueGroup: "q", }, }, { name: "single empty group", endpointName: "foo", groups: []string{""}, expectedEndpoint: micro.EndpointInfo{ Name: "foo", Subject: "foo", QueueGroup: "q", }, }, { name: "empty groups", endpointName: "foo", groups: []string{"", "g1", ""}, expectedEndpoint: micro.EndpointInfo{ Name: "foo", Subject: "g1.foo", QueueGroup: "q", }, }, { name: "multiple groups", endpointName: "foo", groups: []string{"g1", "g2", "g3"}, expectedEndpoint: micro.EndpointInfo{ Name: "foo", Subject: "g1.g2.g3.foo", QueueGroup: "q", }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer srv.Stop() if len(test.groups) > 0 { group := srv.AddGroup(test.groups[0]) for _, g := range test.groups[1:] { group = group.AddGroup(g) } err = group.AddEndpoint(test.endpointName, micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } } else { err = srv.AddEndpoint(test.endpointName, micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } } info := srv.Info() if len(info.Endpoints) != 1 { t.Fatalf("Expected 1 registered endpoint; got: %d", len(info.Endpoints)) } if !reflect.DeepEqual(info.Endpoints[0], test.expectedEndpoint) { t.Fatalf("Invalid endpoint; want: %s, got: %s", test.expectedEndpoint, info.Endpoints[0]) } }) } } func TestMonitoringHandlers(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() asyncErr := make(chan struct{}) errHandler := func(s micro.Service, n *micro.NATSError) { asyncErr <- struct{}{} } config := micro.Config{ Name: "test_service", Version: "0.1.0", ErrorHandler: errHandler, Endpoint: µ.EndpointConfig{ Subject: "test.func", Handler: micro.HandlerFunc(func(r micro.Request) {}), Metadata: map[string]string{"basic": "schema"}, }, } srv, err := micro.AddService(nc, config) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer func() { srv.Stop() if !srv.Stopped() { t.Fatalf("Expected service to be stopped") } }() info := srv.Info() tests := []struct { name string subject string withError bool expectedResponse any }{ { name: "PING all", subject: "$SRV.PING", expectedResponse: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, }, }, { name: "PING name", subject: "$SRV.PING.test_service", expectedResponse: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, }, }, { name: "PING ID", subject: fmt.Sprintf("$SRV.PING.test_service.%s", info.ID), expectedResponse: micro.Ping{ Type: micro.PingResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, }, }, { name: "INFO all", subject: "$SRV.INFO", expectedResponse: micro.Info{ Type: micro.InfoResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, Endpoints: []micro.EndpointInfo{ { Name: "default", Subject: "test.func", QueueGroup: "q", Metadata: map[string]string{"basic": "schema"}, }, }, }, }, { name: "INFO name", subject: "$SRV.INFO.test_service", expectedResponse: micro.Info{ Type: micro.InfoResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, Endpoints: []micro.EndpointInfo{ { Name: "default", Subject: "test.func", QueueGroup: "q", Metadata: map[string]string{"basic": "schema"}, }, }, }, }, { name: "INFO ID", subject: fmt.Sprintf("$SRV.INFO.test_service.%s", info.ID), expectedResponse: micro.Info{ Type: micro.InfoResponseType, ServiceIdentity: micro.ServiceIdentity{ Name: "test_service", Version: "0.1.0", ID: info.ID, Metadata: map[string]string{}, }, Endpoints: []micro.EndpointInfo{ { Name: "default", Subject: "test.func", QueueGroup: "q", Metadata: map[string]string{"basic": "schema"}, }, }, }, }, { name: "PING error", subject: "$SRV.PING", withError: true, }, { name: "INFO error", subject: "$SRV.INFO", withError: true, }, { name: "STATS error", subject: "$SRV.STATS", withError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.withError { // use publish instead of request, so Respond will fail inside the handler if err := nc.Publish(test.subject, nil); err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-asyncErr: return case <-time.After(1 * time.Second): t.Fatalf("Timeout waiting for async error") } return } resp, err := nc.Request(test.subject, nil, 1*time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } respMap := make(map[string]any) if err := json.Unmarshal(resp.Data, &respMap); err != nil { t.Fatalf("Unexpected error: %v", err) } expectedResponseJSON, err := json.Marshal(test.expectedResponse) if err != nil { t.Fatalf("Unexpected error: %v", err) } expectedRespMap := make(map[string]any) if err := json.Unmarshal(expectedResponseJSON, &expectedRespMap); err != nil { t.Fatalf("Unexpected error: %v", err) } if !reflect.DeepEqual(respMap, expectedRespMap) { t.Fatalf("Invalid response; want: %+v; got: %+v", expectedRespMap, respMap) } }) } } func TestContextHandler(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() type key string ctx = context.WithValue(ctx, key("key"), []byte("val")) handler := func(ctx context.Context, req micro.Request) { select { case <-ctx.Done(): req.Error("400", "context canceled", nil) default: v := ctx.Value(key("key")) req.Respond(v.([]byte)) } } config := micro.Config{ Name: "test_service", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "test.func", Handler: micro.ContextHandler(ctx, handler), }, } srv, err := micro.AddService(nc, config) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer srv.Stop() resp, err := nc.Request("test.func", nil, 1*time.Second) if err != nil { t.Fatalf("Unexpected error: %s", err) } if string(resp.Data) != "val" { t.Fatalf("Invalid response; want: %q; got: %q", "val", string(resp.Data)) } cancel() resp, err = nc.Request("test.func", nil, 1*time.Second) if err != nil { t.Fatalf("Unexpected error: %s", err) } if resp.Header.Get(micro.ErrorCodeHeader) != "400" { t.Fatalf("Expected error response after canceling context; got: %q", string(resp.Data)) } } func TestAddEndpoint_Concurrency(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() ctx := context.Background() handler := func(ctx context.Context, req micro.Request) { req.RespondJSON(map[string]any{"hello": "world"}) } config := micro.Config{ Name: "test_service", Version: "0.1.0", } srv, err := micro.AddService(nc, config) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer srv.Stop() res := make(chan error, 10) wg := sync.WaitGroup{} wg.Add(10) // now add a few endpoints concurrently // and make sure they are added successfully // and there is no race for i := 0; i < 10; i++ { go func(i int) { wg.Wait() res <- srv.AddEndpoint(fmt.Sprintf("test%d", i), micro.ContextHandler(ctx, handler)) }(i) // after all goroutines are started, release the lock } wg.Add(-10) for i := 0; i < 10; i++ { select { case err := <-res: if err != nil { t.Fatalf("Unexpected error: %s", err) } case <-time.After(1 * time.Second): t.Fatalf("Timeout waiting for endpoint to be added") } } if len(srv.Info().Endpoints) != 10 { t.Fatalf("Expected 11 endpoints, got: %d", len(srv.Info().Endpoints)) } } func TestServiceStats(t *testing.T) { handler := func(r micro.Request) { r.Respond([]byte("ok")) } tests := []struct { name string config micro.Config expectedStats map[string]any }{ { name: "stats handler", config: micro.Config{ Name: "test_service", Version: "0.1.0", }, }, { name: "with stats handler", config: micro.Config{ Name: "test_service", Version: "0.1.0", StatsHandler: func(e *micro.Endpoint) any { return map[string]any{ "key": "val", } }, }, expectedStats: map[string]any{ "key": "val", }, }, { name: "with default endpoint", config: micro.Config{ Name: "test_service", Version: "0.1.0", Endpoint: µ.EndpointConfig{ Subject: "test.func", Handler: micro.HandlerFunc(handler), Metadata: map[string]string{"test": "value"}, }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() srv, err := micro.AddService(nc, test.config) if err != nil { t.Fatalf("Unexpected error: %v", err) } if test.config.Endpoint == nil { opts := []micro.EndpointOpt{micro.WithEndpointSubject("test.func")} if err := srv.AddEndpoint("func", micro.HandlerFunc(handler), opts...); err != nil { t.Fatalf("Unexpected error: %v", err) } } defer srv.Stop() for i := 0; i < 10; i++ { if _, err := nc.Request("test.func", []byte("msg"), time.Second); err != nil { t.Fatalf("Unexpected error: %v", err) } } // Malformed request, missing reply subjtct // This should be reflected in errors if err := nc.Publish("test.func", []byte("err")); err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(10 * time.Millisecond) info := srv.Info() resp, err := nc.Request(fmt.Sprintf("$SRV.STATS.test_service.%s", info.ID), nil, 1*time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } var stats micro.Stats if err := json.Unmarshal(resp.Data, &stats); err != nil { t.Fatalf("Unexpected error: %v", err) } if len(stats.Endpoints) != 1 { t.Fatalf("Unexpected number of endpoints: want: %d; got: %d", 1, len(stats.Endpoints)) } if stats.Name != info.Name { t.Errorf("Unexpected service name; want: %s; got: %s", info.Name, stats.Name) } if stats.ID != info.ID { t.Errorf("Unexpected service name; want: %s; got: %s", info.ID, stats.ID) } if test.config.Endpoint == nil && stats.Endpoints[0].Name != "func" { t.Errorf("Invalid endpoint name; want: %s; got: %s", "func", stats.Endpoints[0].Name) } if test.config.Endpoint != nil && stats.Endpoints[0].Name != "default" { t.Errorf("Invalid endpoint name; want: %s; got: %s", "default", stats.Endpoints[0].Name) } if stats.Endpoints[0].Subject != "test.func" { t.Errorf("Invalid endpoint subject; want: %s; got: %s", "test.func", stats.Endpoints[0].Subject) } if stats.Endpoints[0].NumRequests != 11 { t.Errorf("Unexpected num_requests; want: 11; got: %d", stats.Endpoints[0].NumRequests) } if stats.Endpoints[0].NumErrors != 1 { t.Errorf("Unexpected num_errors; want: 1; got: %d", stats.Endpoints[0].NumErrors) } if stats.Endpoints[0].AverageProcessingTime == 0 { t.Errorf("Expected non-empty AverageProcessingTime") } if stats.Endpoints[0].ProcessingTime == 0 { t.Errorf("Expected non-empty ProcessingTime") } if stats.Started.IsZero() { t.Errorf("Expected non-empty start time") } if stats.Type != micro.StatsResponseType { t.Errorf("Invalid response type; want: %s; got: %s", micro.StatsResponseType, stats.Type) } if test.expectedStats != nil { var data map[string]any if err := json.Unmarshal(stats.Endpoints[0].Data, &data); err != nil { t.Fatalf("Unexpected error: %v", err) } if !reflect.DeepEqual(data, test.expectedStats) { t.Fatalf("Invalid data from stats handler; want: %v; got: %v", test.expectedStats, data) } } }) } } func TestRequestRespond(t *testing.T) { type x struct { A string `json:"a"` B int `json:"b"` } tests := []struct { name string respondData any respondHeaders micro.Headers errDescription string errCode string errData []byte expectedMessage string expectedCode string expectedResponse []byte withRespondError error }{ { name: "byte response", respondData: []byte("OK"), expectedResponse: []byte("OK"), }, { name: "byte response, with headers", respondHeaders: micro.Headers{"key": []string{"value"}}, respondData: []byte("OK"), expectedResponse: []byte("OK"), }, { name: "byte response, connection closed", respondData: []byte("OK"), withRespondError: micro.ErrRespond, }, { name: "struct response", respondData: x{"abc", 5}, expectedResponse: []byte(`{"a":"abc","b":5}`), }, { name: "invalid response data", respondData: func() {}, withRespondError: micro.ErrMarshalResponse, }, { name: "generic error", errDescription: "oops", errCode: "500", errData: []byte("error!"), expectedMessage: "oops", expectedCode: "500", }, { name: "generic error, with headers", respondHeaders: micro.Headers{"key": []string{"value"}}, errDescription: "oops", errCode: "500", errData: []byte("error!"), expectedMessage: "oops", expectedCode: "500", }, { name: "error without response payload", errDescription: "oops", errCode: "500", expectedMessage: "oops", expectedCode: "500", }, { name: "missing error code", errDescription: "oops", withRespondError: micro.ErrArgRequired, }, { name: "missing error description", errCode: "500", withRespondError: micro.ErrArgRequired, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() respData := test.respondData respError := test.withRespondError errCode := test.errCode errDesc := test.errDescription errData := test.errData handler := func(req micro.Request) { if errors.Is(test.withRespondError, micro.ErrRespond) { nc.Close() return } if val := req.Headers().Get("key"); val != "value" { t.Fatalf("Expected headers in the request") } if !bytes.Equal(req.Data(), []byte("req")) { t.Fatalf("Invalid request data; want: %q; got: %q", "req", req.Data()) } if errCode == "" && errDesc == "" { if resp, ok := respData.([]byte); ok { err := req.Respond(resp, micro.WithHeaders(test.respondHeaders)) if respError != nil { if !errors.Is(err, respError) { t.Fatalf("Expected error: %v; got: %v", respError, err) } return } if err != nil { t.Fatalf("Unexpected error when sending response: %v", err) } } else { err := req.RespondJSON(respData, micro.WithHeaders(test.respondHeaders)) if respError != nil { if !errors.Is(err, respError) { t.Fatalf("Expected error: %v; got: %v", respError, err) } return } if err != nil { t.Fatalf("Unexpected error when sending response: %v", err) } } return } err := req.Error(errCode, errDesc, errData, micro.WithHeaders(test.respondHeaders)) if respError != nil { if !errors.Is(err, respError) { t.Fatalf("Expected error: %v; got: %v", respError, err) } return } if err != nil { t.Fatalf("Unexpected error when sending response: %v", err) } } svc, err := micro.AddService(nc, micro.Config{ Name: "CoolService", Version: "0.1.0", Description: "test service", Endpoint: µ.EndpointConfig{ Subject: "test.func", Handler: micro.HandlerFunc(handler), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer svc.Stop() nfo := svc.Info() if nfo.Metadata == nil { t.Fatalf("Produced nil metadata") } resp, err := nc.RequestMsg(&nats.Msg{ Subject: "test.func", Data: []byte("req"), Header: nats.Header{"key": []string{"value"}}, }, 50*time.Millisecond) if test.withRespondError != nil { if err == nil { t.Fatalf("Expected error when receiving response") } return } if err != nil { t.Fatalf("request error: %v", err) } if test.errCode != "" { description := resp.Header.Get("Nats-Service-Error") if description != test.expectedMessage { t.Fatalf("Invalid response message; want: %q; got: %q", test.expectedMessage, description) } expectedHeaders := micro.Headers{ "Nats-Service-Error-Code": []string{resp.Header.Get("Nats-Service-Error-Code")}, "Nats-Service-Error": []string{resp.Header.Get("Nats-Service-Error")}, } for k, v := range test.respondHeaders { expectedHeaders[k] = v } if !reflect.DeepEqual(expectedHeaders, micro.Headers(resp.Header)) { t.Fatalf("Invalid response headers; want: %v; got: %v", test.respondHeaders, resp.Header) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if !bytes.Equal(bytes.TrimSpace(resp.Data), bytes.TrimSpace(test.expectedResponse)) { t.Fatalf("Invalid response; want: %s; got: %s", string(test.expectedResponse), string(resp.Data)) } if !reflect.DeepEqual(test.respondHeaders, micro.Headers(resp.Header)) { t.Fatalf("Invalid response headers; want: %v; got: %v", test.respondHeaders, resp.Header) } }) } } func RunServerOnPort(port int) *server.Server { opts := natsserver.DefaultTestOptions opts.Port = port return RunServerWithOptions(&opts) } func RunServerWithOptions(opts *server.Options) *server.Server { return natsserver.RunServer(opts) } func TestControlSubject(t *testing.T) { tests := []struct { name string verb micro.Verb srvName string id string expectedSubject string withError error }{ { name: "PING ALL", verb: micro.PingVerb, expectedSubject: "$SRV.PING", }, { name: "PING name", verb: micro.PingVerb, srvName: "test", expectedSubject: "$SRV.PING.test", }, { name: "PING id", verb: micro.PingVerb, srvName: "test", id: "123", expectedSubject: "$SRV.PING.test.123", }, { name: "invalid verb", verb: micro.Verb(100), withError: micro.ErrVerbNotSupported, }, { name: "name not provided", verb: micro.PingVerb, srvName: "", id: "123", withError: micro.ErrServiceNameRequired, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { res, err := micro.ControlSubject(test.verb, test.srvName, test.id) if test.withError != nil { if !errors.Is(err, test.withError) { t.Fatalf("Expected error: %v; got: %v", test.withError, err) } return } if err != nil { t.Errorf("Unexpected error: %v", err) } if res != test.expectedSubject { t.Errorf("Invalid subject; want: %q; got: %q", test.expectedSubject, res) } }) } } func TestCustomQueueGroup(t *testing.T) { tests := []struct { name string endpointInit func(*testing.T, *nats.Conn) micro.Service expectedQueueGroups map[string]string }{ { name: "default queue group", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", Endpoint: µ.EndpointConfig{ Subject: "foo", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } err = srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv }, expectedQueueGroups: map[string]string{ "default": "q", "bar": "q", }, }, { name: "custom queue group on service config", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroup: "custom", Endpoint: µ.EndpointConfig{ Subject: "foo", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on service directly, should have the same queue group err = srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add group with queue group from service config g1 := srv.AddGroup("g1") // add endpoint on group, should have queue group from service config err = g1.AddEndpoint("baz", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv }, expectedQueueGroups: map[string]string{ "default": "custom", "bar": "custom", "baz": "custom", }, }, { name: "disable queue group on service config", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroupDisabled: true, Endpoint: µ.EndpointConfig{ Subject: "foo", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on service directly, should have inherited disabled queue group err = srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add group with queue group from service config g1 := srv.AddGroup("g1") // add endpoint on group, should have queue group disabled err = g1.AddEndpoint("baz", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on a service with queue group enabled err = srv.AddEndpoint("qux", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("q-qux")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on group and set custom queue group err = g1.AddEndpoint("quux", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("q-quux")) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv }, expectedQueueGroups: map[string]string{ "default": "", "bar": "", "baz": "", "qux": "q-qux", "quux": "q-quux", }, }, { name: "overwriting queue groups", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroup: "q-config", Endpoint: µ.EndpointConfig{ Subject: "foo", QueueGroup: "q-default", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } g1 := srv.AddGroup("g1", micro.WithGroupQueueGroup("q-g1")) // should have the same queue group as the parent group g2 := g1.AddGroup("g2") // overwrite parent group queue group g3 := g2.AddGroup("g3", micro.WithGroupQueueGroup("q-g3")) // disable queue group on group g4 := g2.AddGroup("g4", micro.WithGroupQueueGroupDisabled()) // add endpoint on service directly, overwriting the queue group err = srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("q-bar")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on group, should have queue group from g1 err = g2.AddEndpoint("baz", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on group, overwriting the queue group err = g2.AddEndpoint("qux", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("q-qux")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on group, should have queue group from g3 err = g3.AddEndpoint("quux", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } err = g4.AddEndpoint("foo-disabled", micro.HandlerFunc(func(r micro.Request) {})) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv }, expectedQueueGroups: map[string]string{ "default": "q-default", "bar": "q-bar", "baz": "q-g1", "qux": "q-qux", "quux": "q-g3", "foo-disabled": "", }, }, { name: "empty queue group in option, inherit from parent", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { srv, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroup: "q-config", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // add endpoint on service directly, overwriting the queue group err = srv.AddEndpoint("bar", micro.HandlerFunc(func(r micro.Request) {}), micro.WithEndpointQueueGroup("")) if err != nil { t.Fatalf("Unexpected error: %v", err) } return srv }, expectedQueueGroups: map[string]string{ "bar": "q-config", }, }, { name: "invalid queue group on service config", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { _, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroup: ">.abc", Endpoint: µ.EndpointConfig{ Subject: "foo", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if !errors.Is(err, micro.ErrConfigValidation) { t.Fatalf("Expected error: %v; got: %v", micro.ErrConfigValidation, err) } return nil }, }, { name: "invalid queue group on endpoint", endpointInit: func(t *testing.T, nc *nats.Conn) micro.Service { _, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", Endpoint: µ.EndpointConfig{ Subject: "foo", QueueGroup: ">.abc", Handler: micro.HandlerFunc(func(r micro.Request) {}), }, }) if !errors.Is(err, micro.ErrConfigValidation) { t.Fatalf("Expected error: %v; got: %v", micro.ErrConfigValidation, err) } return nil }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() srv := test.endpointInit(t, nc) if srv == nil { return } defer srv.Stop() info := srv.Info() endpoints := make(map[string]micro.EndpointInfo) for _, e := range info.Endpoints { endpoints[e.Name] = e } if len(endpoints) != len(test.expectedQueueGroups) { t.Fatalf("Expected %d endpoints; got: %d", len(test.expectedQueueGroups), len(endpoints)) } for name, expectedGroup := range test.expectedQueueGroups { if endpoints[name].QueueGroup != expectedGroup { t.Fatalf("Invalid queue group for endpoint %q; want: %q; got: %q", name, expectedGroup, endpoints[name].QueueGroup) } } stats := srv.Stats() // make sure the same queue groups are on stats endpointStats := make(map[string]*micro.EndpointStats) for _, e := range stats.Endpoints { endpointStats[e.Name] = e } if len(endpointStats) != len(test.expectedQueueGroups) { t.Fatalf("Expected %d endpoints; got: %d", len(test.expectedQueueGroups), len(endpointStats)) } for name, expectedGroup := range test.expectedQueueGroups { if endpointStats[name].QueueGroup != expectedGroup { t.Fatalf("Invalid queue group for endpoint %q; want: %q; got: %q", name, expectedGroup, endpointStats[name].QueueGroup) } } }) } } func TestCustomQueueGroupMultipleResponses(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() for i := 0; i < 5; i++ { f := func(i int) func(r micro.Request) { return func(r micro.Request) { time.Sleep(10 * time.Millisecond) r.Respond([]byte(fmt.Sprintf("%d", i))) } } service, err := micro.AddService(nc, micro.Config{ Name: "test_service", Version: "0.0.1", QueueGroup: fmt.Sprintf("q-%d", i), Endpoint: µ.EndpointConfig{ Subject: "foo", Handler: micro.HandlerFunc(f(i)), }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer service.Stop() } err = nc.PublishRequest("foo", "rply", []byte("req")) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := nc.SubscribeSync("rply") if err != nil { t.Fatalf("Unexpected error: %v", err) } expectedResponses := map[string]bool{ "0": false, "1": false, "2": false, "3": false, "4": false, } defer sub.Unsubscribe() for i := 0; i < 5; i++ { msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } expectedResponses[string(msg.Data)] = true } msg, err := sub.NextMsg(100 * time.Millisecond) if err == nil { t.Fatalf("Unexpected message: %v", string(msg.Data)) } for k, v := range expectedResponses { if !v { t.Fatalf("Did not receive response from service %s", k) } } } func TestDisableQueueGroup(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() wg := sync.WaitGroup{} // Create 5 service responders. config := micro.Config{ Name: "CoolAddService", Version: "0.1.0", Description: "Add things together", Metadata: map[string]string{"basic": "metadata"}, Endpoint: µ.EndpointConfig{ Subject: "svc.add", Handler: micro.HandlerFunc(func(r micro.Request) { r.Respond(nil) wg.Done() }), }, QueueGroupDisabled: true, } for range 10 { srv, err := micro.AddService(nc, config) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer srv.Stop() } wg.Add(10) // Send a request to the service. if err = nc.PublishRequest("svc.add", "rply", []byte("req")); err != nil { t.Fatalf("Unexpected error: %v", err) } wg.Wait() } nats.go-1.41.0/nats.go000066400000000000000000005010771477351342400144660ustar00rootroot00000000000000// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A Go client for the NATS messaging system (https://nats.io). package nats import ( "bufio" "bytes" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "errors" "fmt" "io" "math/rand" "net" "net/http" "net/textproto" "net/url" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" "github.com/nats-io/nats.go/util" ) // Default Constants const ( Version = "1.41.0" DefaultURL = "nats://127.0.0.1:4222" DefaultPort = 4222 DefaultMaxReconnect = 60 DefaultReconnectWait = 2 * time.Second DefaultReconnectJitter = 100 * time.Millisecond DefaultReconnectJitterTLS = time.Second DefaultTimeout = 2 * time.Second DefaultPingInterval = 2 * time.Minute DefaultMaxPingOut = 2 DefaultMaxChanLen = 64 * 1024 // 64k DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB RequestChanLen = 8 DefaultDrainTimeout = 30 * time.Second DefaultFlusherTimeout = time.Minute LangString = "go" ) const ( // STALE_CONNECTION is for detection and proper handling of stale connections. STALE_CONNECTION = "stale connection" // PERMISSIONS_ERR is for when nats server subject authorization has failed. PERMISSIONS_ERR = "permissions violation" // AUTHORIZATION_ERR is for when nats server user authorization has failed. AUTHORIZATION_ERR = "authorization violation" // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. AUTHENTICATION_EXPIRED_ERR = "user authentication expired" // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked. AUTHENTICATION_REVOKED_ERR = "user authentication revoked" // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit MAX_CONNECTIONS_ERR = "maximum connections exceeded" // MAX_SUBSCRIPTIONS_ERR is for when nats server denies the connection due to server subscriptions limit MAX_SUBSCRIPTIONS_ERR = "maximum subscriptions exceeded" ) // Errors var ( ErrConnectionClosed = errors.New("nats: connection closed") ErrConnectionDraining = errors.New("nats: connection draining") ErrDrainTimeout = errors.New("nats: draining connection timed out") ErrConnectionReconnecting = errors.New("nats: connection reconnecting") ErrSecureConnRequired = errors.New("nats: secure connection required") ErrSecureConnWanted = errors.New("nats: secure connection not available") ErrBadSubscription = errors.New("nats: invalid subscription") ErrTypeSubscription = errors.New("nats: invalid subscription type") ErrBadSubject = errors.New("nats: invalid subject") ErrBadQueueName = errors.New("nats: invalid queue name") ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") ErrTimeout = errors.New("nats: timeout") ErrBadTimeout = errors.New("nats: timeout invalid") ErrAuthorization = errors.New("nats: authorization violation") ErrAuthExpired = errors.New("nats: authentication expired") ErrAuthRevoked = errors.New("nats: authentication revoked") ErrPermissionViolation = errors.New("nats: permissions violation") ErrAccountAuthExpired = errors.New("nats: account authentication expired") ErrNoServers = errors.New("nats: no servers available for connection") ErrJsonParse = errors.New("nats: connect message, json parse error") ErrChanArg = errors.New("nats: argument needs to be a channel type") ErrMaxPayload = errors.New("nats: maximum payload exceeded") ErrMaxMessages = errors.New("nats: maximum messages delivered") ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") ErrClientCertOrRootCAsRequired = errors.New("nats: at least one of certCB or rootCAsCB must be set") ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") ErrInvalidConnection = errors.New("nats: invalid connection") ErrInvalidMsg = errors.New("nats: invalid message or message nil") ErrInvalidArg = errors.New("nats: invalid argument") ErrInvalidContext = errors.New("nats: invalid context") ErrNoDeadlineContext = errors.New("nats: context requires a deadline") ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") ErrNoUserCB = errors.New("nats: user callback not defined") ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) ErrTokenAlreadySet = errors.New("nats: token and token handler both set") ErrUserInfoAlreadySet = errors.New("nats: cannot set user info callback and user/pass") ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") ErrMsgNoReply = errors.New("nats: message does not have a reply") ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server") ErrDisconnected = errors.New("nats: server is disconnected") ErrHeadersNotSupported = errors.New("nats: headers not supported by this server") ErrBadHeaderMsg = errors.New("nats: message could not decode headers") ErrNoResponders = errors.New("nats: no responders available for request") ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") ErrConnectionNotTLS = errors.New("nats: connection is not tls") ErrMaxSubscriptionsExceeded = errors.New("nats: server maximum subscriptions exceeded") ) // GetDefaultOptions returns default configuration options for the client. func GetDefaultOptions() Options { return Options{ AllowReconnect: true, MaxReconnect: DefaultMaxReconnect, ReconnectWait: DefaultReconnectWait, ReconnectJitter: DefaultReconnectJitter, ReconnectJitterTLS: DefaultReconnectJitterTLS, Timeout: DefaultTimeout, PingInterval: DefaultPingInterval, MaxPingsOut: DefaultMaxPingOut, SubChanLen: DefaultMaxChanLen, ReconnectBufSize: DefaultReconnectBufSize, DrainTimeout: DefaultDrainTimeout, FlusherTimeout: DefaultFlusherTimeout, } } // Deprecated: Use GetDefaultOptions() instead. // DefaultOptions is not safe for use by multiple clients. // For details see #308. var DefaultOptions = GetDefaultOptions() // Status represents the state of the connection. type Status int const ( DISCONNECTED = Status(iota) CONNECTED CLOSED RECONNECTING CONNECTING DRAINING_SUBS DRAINING_PUBS ) func (s Status) String() string { switch s { case DISCONNECTED: return "DISCONNECTED" case CONNECTED: return "CONNECTED" case CLOSED: return "CLOSED" case RECONNECTING: return "RECONNECTING" case CONNECTING: return "CONNECTING" case DRAINING_SUBS: return "DRAINING_SUBS" case DRAINING_PUBS: return "DRAINING_PUBS" } return "unknown status" } // ConnHandler is used for asynchronous events such as // disconnected and closed connections. type ConnHandler func(*Conn) // ConnErrHandler is used to process asynchronous events like // disconnected connection with the error (if any). type ConnErrHandler func(*Conn, error) // ErrHandler is used to process asynchronous errors encountered // while processing inbound messages. type ErrHandler func(*Conn, *Subscription, error) // UserJWTHandler is used to fetch and return the account signed // JWT for this user. type UserJWTHandler func() (string, error) // TLSCertHandler is used to fetch and return tls certificate. type TLSCertHandler func() (tls.Certificate, error) // RootCAsHandler is used to fetch and return a set of root certificate // authorities that clients use when verifying server certificates. type RootCAsHandler func() (*x509.CertPool, error) // SignatureHandler is used to sign a nonce from the server while // authenticating with nkeys. The user should sign the nonce and // return the raw signature. The client will base64 encode this to // send to the server. type SignatureHandler func([]byte) ([]byte, error) // AuthTokenHandler is used to generate a new token. type AuthTokenHandler func() string // UserInfoCB is used to pass the username and password when establishing connection. type UserInfoCB func() (string, string) // ReconnectDelayHandler is used to get from the user the desired // delay the library should pause before attempting to reconnect // again. Note that this is invoked after the library tried the // whole list of URLs and failed to reconnect. type ReconnectDelayHandler func(attempts int) time.Duration // asyncCB is used to preserve order for async callbacks. type asyncCB struct { f func() next *asyncCB } type asyncCallbacksHandler struct { mu sync.Mutex cond *sync.Cond head *asyncCB tail *asyncCB } // Option is a function on the options for a connection. type Option func(*Options) error // CustomDialer can be used to specify any dialer, not necessarily a // *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool` // in order to skip the TLS handshake in case not required. type CustomDialer interface { Dial(network, address string) (net.Conn, error) } type InProcessConnProvider interface { InProcessConn() (net.Conn, error) } // Options can be used to create a customized connection. type Options struct { // Url represents a single NATS server url to which the client // will be connecting. If the Servers option is also set, it // then becomes the first server in the Servers array. Url string // InProcessServer represents a NATS server running within the // same process. If this is set then we will attempt to connect // to the server directly rather than using external TCP conns. InProcessServer InProcessConnProvider // Servers is a configured set of servers which this client // will use when attempting to connect. Servers []string // NoRandomize configures whether we will randomize the // server pool. NoRandomize bool // NoEcho configures whether the server will echo back messages // that are sent on this connection if we also have matching subscriptions. // Note this is supported on servers >= version 1.2. Proto 1 or greater. NoEcho bool // Name is an optional name label which will be sent to the server // on CONNECT to identify the client. Name string // Verbose signals the server to send an OK ack for commands // successfully processed by the server. Verbose bool // Pedantic signals the server whether it should be doing further // validation of subjects. Pedantic bool // Secure enables TLS secure connections that skip server // verification by default. NOT RECOMMENDED. Secure bool // TLSConfig is a custom TLS configuration to use for secure // transports. TLSConfig *tls.Config // TLSCertCB is used to fetch and return custom tls certificate. TLSCertCB TLSCertHandler // TLSHandshakeFirst is used to instruct the library perform // the TLS handshake right after the connect and before receiving // the INFO protocol from the server. If this option is enabled // but the server is not configured to perform the TLS handshake // first, the connection will fail. TLSHandshakeFirst bool // RootCAsCB is used to fetch and return a set of root certificate // authorities that clients use when verifying server certificates. RootCAsCB RootCAsHandler // AllowReconnect enables reconnection logic to be used when we // encounter a disconnect from the current server. AllowReconnect bool // MaxReconnect sets the number of reconnect attempts that will be // tried before giving up. If negative, then it will never give up // trying to reconnect. // Defaults to 60. MaxReconnect int // ReconnectWait sets the time to backoff after attempting a reconnect // to a server that we were already connected to previously. // Defaults to 2s. ReconnectWait time.Duration // CustomReconnectDelayCB is invoked after the library tried every // URL in the server list and failed to reconnect. It passes to the // user the current number of attempts. This function returns the // amount of time the library will sleep before attempting to reconnect // again. It is strongly recommended that this value contains some // jitter to prevent all connections to attempt reconnecting at the same time. CustomReconnectDelayCB ReconnectDelayHandler // ReconnectJitter sets the upper bound for a random delay added to // ReconnectWait during a reconnect when no TLS is used. // Defaults to 100ms. ReconnectJitter time.Duration // ReconnectJitterTLS sets the upper bound for a random delay added to // ReconnectWait during a reconnect when TLS is used. // Defaults to 1s. ReconnectJitterTLS time.Duration // Timeout sets the timeout for a Dial operation on a connection. // Defaults to 2s. Timeout time.Duration // DrainTimeout sets the timeout for a Drain Operation to complete. // Defaults to 30s. DrainTimeout time.Duration // FlusherTimeout is the maximum time to wait for write operations // to the underlying connection to complete (including the flusher loop). // Defaults to 1m. FlusherTimeout time.Duration // PingInterval is the period at which the client will be sending ping // commands to the server, disabled if 0 or negative. // Defaults to 2m. PingInterval time.Duration // MaxPingsOut is the maximum number of pending ping commands that can // be awaiting a response before raising an ErrStaleConnection error. // Defaults to 2. MaxPingsOut int // ClosedCB sets the closed handler that is called when a client will // no longer be connected. ClosedCB ConnHandler // DisconnectedCB sets the disconnected handler that is called // whenever the connection is disconnected. // Will not be called if DisconnectedErrCB is set // Deprecated. Use DisconnectedErrCB which passes error that caused // the disconnect event. DisconnectedCB ConnHandler // DisconnectedErrCB sets the disconnected error handler that is called // whenever the connection is disconnected. // Disconnected error could be nil, for instance when user explicitly closes the connection. // DisconnectedCB will not be called if DisconnectedErrCB is set DisconnectedErrCB ConnErrHandler // ConnectedCB sets the connected handler called when the initial connection // is established. It is not invoked on successful reconnects - for reconnections, // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect // to detect whether the initial connect was successful. ConnectedCB ConnHandler // ReconnectedCB sets the reconnected handler called whenever // the connection is successfully reconnected. ReconnectedCB ConnHandler // DiscoveredServersCB sets the callback that is invoked whenever a new // server has joined the cluster. DiscoveredServersCB ConnHandler // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) AsyncErrorCB ErrHandler // ReconnectErrCB sets the callback that is invoked whenever a // reconnect attempt failed ReconnectErrCB ConnErrHandler // ReconnectBufSize is the size of the backing bufio during reconnect. // Once this has been exhausted publish operations will return an error. // Defaults to 8388608 bytes (8MB). ReconnectBufSize int // SubChanLen is the size of the buffered channel used between the socket // Go routine and the message delivery for SyncSubscriptions. // NOTE: This does not affect AsyncSubscriptions which are // dictated by PendingLimits() // Defaults to 65536. SubChanLen int // UserJWT sets the callback handler that will fetch a user's JWT. UserJWT UserJWTHandler // Nkey sets the public nkey that will be used to authenticate // when connecting to the server. UserJWT and Nkey are mutually exclusive // and if defined, UserJWT will take precedence. Nkey string // SignatureCB designates the function used to sign the nonce // presented from the server. SignatureCB SignatureHandler // User sets the username to be used when connecting to the server. User string // Password sets the password to be used when connecting to a server. Password string // UserInfo sets the callback handler that will fetch the username and password. UserInfo UserInfoCB // Token sets the token to be used when connecting to a server. Token string // TokenHandler designates the function used to generate the token to be used when connecting to a server. TokenHandler AuthTokenHandler // Dialer allows a custom net.Dialer when forming connections. // Deprecated: should use CustomDialer instead. Dialer *net.Dialer // CustomDialer allows to specify a custom dialer (not necessarily // a *net.Dialer). CustomDialer CustomDialer // UseOldRequestStyle forces the old method of Requests that utilize // a new Inbox and a new Subscription for each request. UseOldRequestStyle bool // NoCallbacksAfterClientClose allows preventing the invocation of // callbacks after Close() is called. Client won't receive notifications // when Close is invoked by user code. Default is to invoke the callbacks. NoCallbacksAfterClientClose bool // LameDuckModeHandler sets the callback to invoke when the server notifies // the connection that it entered lame duck mode, that is, going to // gradually disconnect all its connections before shutting down. This is // often used in deployments when upgrading NATS Servers. LameDuckModeHandler ConnHandler // RetryOnFailedConnect sets the connection in reconnecting state right // away if it can't connect to a server in the initial set. The // MaxReconnect and ReconnectWait options are used for this process, // similarly to when an established connection is disconnected. // If a ReconnectHandler is set, it will be invoked on the first // successful reconnect attempt (if the initial connect fails), // and if a ClosedHandler is set, it will be invoked if // it fails to connect (after exhausting the MaxReconnect attempts). RetryOnFailedConnect bool // For websocket connections, indicates to the server that the connection // supports compression. If the server does too, then data will be compressed. Compression bool // For websocket connections, adds a path to connections url. // This is useful when connecting to NATS behind a proxy. ProxyPath string // InboxPrefix allows the default _INBOX prefix to be customized InboxPrefix string // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy). IgnoreAuthErrorAbort bool // SkipHostLookup skips the DNS lookup for the server hostname. SkipHostLookup bool // PermissionErrOnSubscribe - if set to true, the client will return ErrPermissionViolation // from SubscribeSync if the server returns a permissions error for a subscription. // Defaults to false. PermissionErrOnSubscribe bool } const ( // Scratch storage for assembling protocol headers scratchSize = 512 // The size of the bufio reader/writer on top of the socket. defaultBufSize = 32768 // The buffered size of the flush "kick" channel flushChanSize = 1 // Default server pool size srvPoolSize = 4 // NUID size nuidSize = 22 // Default ports used if none is specified in given URL(s) defaultWSPortString = "80" defaultWSSPortString = "443" defaultPortString = "4222" ) // A Conn represents a bare connection to a nats-server. // It can send and receive []byte payloads. // The connection is safe to use in multiple Go routines concurrently. type Conn struct { // Keep all members for which we use atomic at the beginning of the // struct and make sure they are all 64bits (or use padding if necessary). // atomic.* functions crash on 32bit machines if operand is not aligned // at 64bit. See https://github.com/golang/go/issues/599 Statistics mu sync.RWMutex // Opts holds the configuration of the Conn. // Modifying the configuration of a running Conn is a race. Opts Options wg sync.WaitGroup srvPool []*srv current *srv urls map[string]struct{} // Keep track of all known URLs (used by processInfo) conn net.Conn bw *natsWriter br *natsReader fch chan struct{} info serverInfo ssid int64 subsMu sync.RWMutex subs map[int64]*Subscription ach *asyncCallbacksHandler pongs []chan struct{} scratch [scratchSize]byte status Status statListeners map[Status][]chan Status initc bool // true if the connection is performing the initial connect err error ps *parseState ptmr *time.Timer pout int ar bool // abort reconnect rqch chan struct{} ws bool // true if a websocket connection // New style response handler respSub string // The wildcard subject respSubPrefix string // the wildcard prefix including trailing . respSubLen int // the length of the wildcard prefix excluding trailing . respMux *Subscription // A single response subscription respMap map[string]chan *Msg // Request map for the response msg channels respRand *rand.Rand // Used for generating suffix // Msg filters for testing. // Protected by subsMu filters map[string]msgFilter } type natsReader struct { r io.Reader buf []byte off int n int } type natsWriter struct { w io.Writer bufs []byte limit int pending *bytes.Buffer plimit int } // Subscription represents interest in a given subject. type Subscription struct { mu sync.Mutex sid int64 // Subject that represents this subscription. This can be different // than the received subject inside a Msg if this is a wildcard. Subject string // Optional queue group name. If present, all subscriptions with the // same name will form a distributed queue, and each message will // only be processed by one member of the group. Queue string // For holding information about a JetStream consumer. jsi *jsSub delivered uint64 max uint64 conn *Conn mcb MsgHandler mch chan *Msg errCh chan (error) closed bool sc bool connClosed bool draining bool status SubStatus statListeners map[chan SubStatus][]SubStatus permissionsErr error // Type of Subscription typ SubscriptionType // Async linked list pHead *Msg pTail *Msg pCond *sync.Cond pDone func(subject string) // Pending stats, async subscriptions, high-speed etc. pMsgs int pBytes int pMsgsMax int pBytesMax int pMsgsLimit int pBytesLimit int dropped int } // Status represents the state of the connection. type SubStatus int const ( SubscriptionActive = SubStatus(iota) SubscriptionDraining SubscriptionClosed SubscriptionSlowConsumer ) func (s SubStatus) String() string { switch s { case SubscriptionActive: return "Active" case SubscriptionDraining: return "Draining" case SubscriptionClosed: return "Closed" case SubscriptionSlowConsumer: return "SlowConsumer" } return "unknown status" } // Msg represents a message delivered by NATS. This structure is used // by Subscribers and PublishMsg(). // // # Types of Acknowledgements // // In case using JetStream, there are multiple ways to ack a Msg: // // // Acknowledgement that a message has been processed. // msg.Ack() // // // Negatively acknowledges a message. // msg.Nak() // // // Terminate a message so that it is not redelivered further. // msg.Term() // // // Signal the server that the message is being worked on and reset redelivery timer. // msg.InProgress() type Msg struct { Subject string Reply string Header Header Data []byte Sub *Subscription // Internal next *Msg wsz int barrier *barrierInfo ackd uint32 } // Compares two msgs, ignores sub but checks all other public fields. func (m *Msg) Equal(msg *Msg) bool { if m == msg { return true } if m == nil || msg == nil { return false } if m.Subject != msg.Subject || m.Reply != msg.Reply { return false } if !bytes.Equal(m.Data, msg.Data) { return false } if len(m.Header) != len(msg.Header) { return false } for k, v := range m.Header { val, ok := msg.Header[k] if !ok || len(v) != len(val) { return false } for i, hdr := range v { if hdr != val[i] { return false } } } return true } // Size returns a message size in bytes. func (m *Msg) Size() int { if m.wsz != 0 { return m.wsz } hdr, _ := m.headerBytes() return len(m.Subject) + len(m.Reply) + len(hdr) + len(m.Data) } func (m *Msg) headerBytes() ([]byte, error) { var hdr []byte if len(m.Header) == 0 { return hdr, nil } var b bytes.Buffer _, err := b.WriteString(hdrLine) if err != nil { return nil, ErrBadHeaderMsg } err = http.Header(m.Header).Write(&b) if err != nil { return nil, ErrBadHeaderMsg } _, err = b.WriteString(crlf) if err != nil { return nil, ErrBadHeaderMsg } return b.Bytes(), nil } type barrierInfo struct { refs int64 f func() } // Tracks various stats received and sent on this connection, // including counts for messages and bytes. type Statistics struct { InMsgs uint64 OutMsgs uint64 InBytes uint64 OutBytes uint64 Reconnects uint64 } // Tracks individual backend servers. type srv struct { url *url.URL didConnect bool reconnects int lastErr error isImplicit bool tlsName string } // The INFO block received from the server. type serverInfo struct { ID string `json:"server_id"` Name string `json:"server_name"` Proto int `json:"proto"` Version string `json:"version"` Host string `json:"host"` Port int `json:"port"` Headers bool `json:"headers"` AuthRequired bool `json:"auth_required,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSAvailable bool `json:"tls_available,omitempty"` MaxPayload int64 `json:"max_payload"` CID uint64 `json:"client_id,omitempty"` ClientIP string `json:"client_ip,omitempty"` Nonce string `json:"nonce,omitempty"` Cluster string `json:"cluster,omitempty"` ConnectURLs []string `json:"connect_urls,omitempty"` LameDuckMode bool `json:"ldm,omitempty"` } const ( // clientProtoZero is the original client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ /* clientProtoZero */ _ = iota // clientProtoInfo signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. clientProtoInfo ) type connectInfo struct { Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` UserJWT string `json:"jwt,omitempty"` Nkey string `json:"nkey,omitempty"` Signature string `json:"sig,omitempty"` User string `json:"user,omitempty"` Pass string `json:"pass,omitempty"` Token string `json:"auth_token,omitempty"` TLS bool `json:"tls_required"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` Echo bool `json:"echo"` Headers bool `json:"headers"` NoResponders bool `json:"no_responders"` } // MsgHandler is a callback function that processes messages delivered to // asynchronous subscribers. type MsgHandler func(msg *Msg) // Connect will attempt to connect to the NATS system. // The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 // Comma separated arrays are also supported, e.g. urlA, urlB. // Options start with the defaults but can be overridden. // To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as // `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls). func Connect(url string, options ...Option) (*Conn, error) { opts := GetDefaultOptions() opts.Servers = processUrlString(url) for _, opt := range options { if opt != nil { if err := opt(&opts); err != nil { return nil, err } } } return opts.Connect() } // Options that can be passed to Connect. // Name is an Option to set the client name. func Name(name string) Option { return func(o *Options) error { o.Name = name return nil } } // InProcessServer is an Option that will try to establish a direction to a NATS server // running within the process instead of dialing via TCP. func InProcessServer(server InProcessConnProvider) Option { return func(o *Options) error { o.InProcessServer = server return nil } } // Secure is an Option to enable TLS secure connections that skip server verification by default. // Pass a TLS Configuration for proper TLS. // A TLS Configuration using InsecureSkipVerify should NOT be used in a production setting. func Secure(tls ...*tls.Config) Option { return func(o *Options) error { o.Secure = true // Use of variadic just simplifies testing scenarios. We only take the first one. if len(tls) > 1 { return ErrMultipleTLSConfigs } if len(tls) == 1 { o.TLSConfig = tls[0] } return nil } } // ClientTLSConfig is an Option to set the TLS configuration for secure // connections. It can be used to e.g. set TLS config with cert and root CAs // from memory. For simple use case of loading cert and CAs from file, // ClientCert and RootCAs options are more convenient. // If Secure is not already set this will set it as well. func ClientTLSConfig(certCB TLSCertHandler, rootCAsCB RootCAsHandler) Option { return func(o *Options) error { o.Secure = true if certCB == nil && rootCAsCB == nil { return ErrClientCertOrRootCAsRequired } // Smoke test the callbacks to fail early // if they are not valid. if certCB != nil { if _, err := certCB(); err != nil { return err } } if rootCAsCB != nil { if _, err := rootCAsCB(); err != nil { return err } } if o.TLSConfig == nil { o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } o.TLSCertCB = certCB o.RootCAsCB = rootCAsCB return nil } } // RootCAs is a helper option to provide the RootCAs pool from a list of filenames. // If Secure is not already set this will set it as well. func RootCAs(file ...string) Option { return func(o *Options) error { rootCAsCB := func() (*x509.CertPool, error) { pool := x509.NewCertPool() for _, f := range file { rootPEM, err := os.ReadFile(f) if err != nil || rootPEM == nil { return nil, fmt.Errorf("nats: error loading or parsing rootCA file: %w", err) } ok := pool.AppendCertsFromPEM(rootPEM) if !ok { return nil, fmt.Errorf("nats: failed to parse root certificate from %q", f) } } return pool, nil } if o.TLSConfig == nil { o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } if _, err := rootCAsCB(); err != nil { return err } o.RootCAsCB = rootCAsCB o.Secure = true return nil } } // ClientCert is a helper option to provide the client certificate from a file. // If Secure is not already set this will set it as well. func ClientCert(certFile, keyFile string) Option { return func(o *Options) error { tlsCertCB := func() (tls.Certificate, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err) } cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err) } return cert, nil } if o.TLSConfig == nil { o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } if _, err := tlsCertCB(); err != nil { return err } o.TLSCertCB = tlsCertCB o.Secure = true return nil } } // NoReconnect is an Option to turn off reconnect behavior. func NoReconnect() Option { return func(o *Options) error { o.AllowReconnect = false return nil } } // DontRandomize is an Option to turn off randomizing the server pool. func DontRandomize() Option { return func(o *Options) error { o.NoRandomize = true return nil } } // NoEcho is an Option to turn off messages echoing back from a server. // Note this is supported on servers >= version 1.2. Proto 1 or greater. func NoEcho() Option { return func(o *Options) error { o.NoEcho = true return nil } } // ReconnectWait is an Option to set the wait time between reconnect attempts. // Defaults to 2s. func ReconnectWait(t time.Duration) Option { return func(o *Options) error { o.ReconnectWait = t return nil } } // MaxReconnects is an Option to set the maximum number of reconnect attempts. // If negative, it will never stop trying to reconnect. // Defaults to 60. func MaxReconnects(max int) Option { return func(o *Options) error { o.MaxReconnect = max return nil } } // ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait. // Defaults to 100ms and 1s, respectively. func ReconnectJitter(jitter, jitterForTLS time.Duration) Option { return func(o *Options) error { o.ReconnectJitter = jitter o.ReconnectJitterTLS = jitterForTLS return nil } } // CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option. // See CustomReconnectDelayCB Option for more details. func CustomReconnectDelay(cb ReconnectDelayHandler) Option { return func(o *Options) error { o.CustomReconnectDelayCB = cb return nil } } // PingInterval is an Option to set the period for client ping commands. // Defaults to 2m. func PingInterval(t time.Duration) Option { return func(o *Options) error { o.PingInterval = t return nil } } // MaxPingsOutstanding is an Option to set the maximum number of ping requests // that can go unanswered by the server before closing the connection. // Defaults to 2. func MaxPingsOutstanding(max int) Option { return func(o *Options) error { o.MaxPingsOut = max return nil } } // ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. // Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1. func ReconnectBufSize(size int) Option { return func(o *Options) error { o.ReconnectBufSize = size return nil } } // Timeout is an Option to set the timeout for Dial on a connection. // Defaults to 2s. func Timeout(t time.Duration) Option { return func(o *Options) error { o.Timeout = t return nil } } // FlusherTimeout is an Option to set the write (and flush) timeout on a connection. func FlusherTimeout(t time.Duration) Option { return func(o *Options) error { o.FlusherTimeout = t return nil } } // DrainTimeout is an Option to set the timeout for draining a connection. // Defaults to 30s. func DrainTimeout(t time.Duration) Option { return func(o *Options) error { o.DrainTimeout = t return nil } } // DisconnectErrHandler is an Option to set the disconnected error handler. func DisconnectErrHandler(cb ConnErrHandler) Option { return func(o *Options) error { o.DisconnectedErrCB = cb return nil } } // DisconnectHandler is an Option to set the disconnected handler. // Deprecated: Use DisconnectErrHandler. func DisconnectHandler(cb ConnHandler) Option { return func(o *Options) error { o.DisconnectedCB = cb return nil } } // ConnectHandler is an Option to set the connected handler. func ConnectHandler(cb ConnHandler) Option { return func(o *Options) error { o.ConnectedCB = cb return nil } } // ReconnectHandler is an Option to set the reconnected handler. func ReconnectHandler(cb ConnHandler) Option { return func(o *Options) error { o.ReconnectedCB = cb return nil } } // ReconnectErrHandler is an Option to set the reconnect error handler. func ReconnectErrHandler(cb ConnErrHandler) Option { return func(o *Options) error { o.ReconnectErrCB = cb return nil } } // ClosedHandler is an Option to set the closed handler. func ClosedHandler(cb ConnHandler) Option { return func(o *Options) error { o.ClosedCB = cb return nil } } // DiscoveredServersHandler is an Option to set the new servers handler. func DiscoveredServersHandler(cb ConnHandler) Option { return func(o *Options) error { o.DiscoveredServersCB = cb return nil } } // ErrorHandler is an Option to set the async error handler. func ErrorHandler(cb ErrHandler) Option { return func(o *Options) error { o.AsyncErrorCB = cb return nil } } // UserInfo is an Option to set the username and password to // use when not included directly in the URLs. func UserInfo(user, password string) Option { return func(o *Options) error { o.User = user o.Password = password return nil } } func UserInfoHandler(cb UserInfoCB) Option { return func(o *Options) error { o.UserInfo = cb return nil } } // Token is an Option to set the token to use // when a token is not included directly in the URLs // and when a token handler is not provided. func Token(token string) Option { return func(o *Options) error { if o.TokenHandler != nil { return ErrTokenAlreadySet } o.Token = token return nil } } // TokenHandler is an Option to set the token handler to use // when a token is not included directly in the URLs // and when a token is not set. func TokenHandler(cb AuthTokenHandler) Option { return func(o *Options) error { if o.Token != "" { return ErrTokenAlreadySet } o.TokenHandler = cb return nil } } // UserCredentials is a convenience function that takes a filename // for a user's JWT and a filename for the user's private Nkey seed. func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { userCB := func() (string, error) { return userFromFile(userOrChainedFile) } var keyFile string if len(seedFiles) > 0 { keyFile = seedFiles[0] } else { keyFile = userOrChainedFile } sigCB := func(nonce []byte) ([]byte, error) { return sigHandler(nonce, keyFile) } return UserJWT(userCB, sigCB) } // UserJWTAndSeed is a convenience function that takes the JWT and seed // values as strings. func UserJWTAndSeed(jwt string, seed string) Option { userCB := func() (string, error) { return jwt, nil } sigCB := func(nonce []byte) ([]byte, error) { kp, err := nkeys.FromSeed([]byte(seed)) if err != nil { return nil, fmt.Errorf("unable to extract key pair from seed: %w", err) } // Wipe our key on exit. defer kp.Wipe() sig, _ := kp.Sign(nonce) return sig, nil } return UserJWT(userCB, sigCB) } // UserJWT will set the callbacks to retrieve the user's JWT and // the signature callback to sign the server nonce. This an the Nkey // option are mutually exclusive. func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { return func(o *Options) error { if userCB == nil { return ErrNoUserCB } if sigCB == nil { return ErrUserButNoSigCB } // Smoke test the user callback to ensure it is setup properly // when processing options. if _, err := userCB(); err != nil { return err } o.UserJWT = userCB o.SignatureCB = sigCB return nil } } // Nkey will set the public Nkey and the signature callback to // sign the server nonce. func Nkey(pubKey string, sigCB SignatureHandler) Option { return func(o *Options) error { o.Nkey = pubKey o.SignatureCB = sigCB if pubKey != "" && sigCB == nil { return ErrNkeyButNoSigCB } return nil } } // SyncQueueLen will set the maximum queue len for the internal // channel used for SubscribeSync(). // Defaults to 65536. func SyncQueueLen(max int) Option { return func(o *Options) error { o.SubChanLen = max return nil } } // Dialer is an Option to set the dialer which will be used when // attempting to establish a connection. // Deprecated: Should use CustomDialer instead. func Dialer(dialer *net.Dialer) Option { return func(o *Options) error { o.Dialer = dialer return nil } } // SetCustomDialer is an Option to set a custom dialer which will be // used when attempting to establish a connection. If both Dialer // and CustomDialer are specified, CustomDialer takes precedence. func SetCustomDialer(dialer CustomDialer) Option { return func(o *Options) error { o.CustomDialer = dialer return nil } } // UseOldRequestStyle is an Option to force usage of the old Request style. func UseOldRequestStyle() Option { return func(o *Options) error { o.UseOldRequestStyle = true return nil } } // NoCallbacksAfterClientClose is an Option to disable callbacks when user code // calls Close(). If close is initiated by any other condition, callbacks // if any will be invoked. func NoCallbacksAfterClientClose() Option { return func(o *Options) error { o.NoCallbacksAfterClientClose = true return nil } } // LameDuckModeHandler sets the callback to invoke when the server notifies // the connection that it entered lame duck mode, that is, going to // gradually disconnect all its connections before shutting down. This is // often used in deployments when upgrading NATS Servers. func LameDuckModeHandler(cb ConnHandler) Option { return func(o *Options) error { o.LameDuckModeHandler = cb return nil } } // RetryOnFailedConnect sets the connection in reconnecting state right away // if it can't connect to a server in the initial set. // See RetryOnFailedConnect option for more details. func RetryOnFailedConnect(retry bool) Option { return func(o *Options) error { o.RetryOnFailedConnect = retry return nil } } // Compression is an Option to indicate if this connection supports // compression. Currently only supported for Websocket connections. func Compression(enabled bool) Option { return func(o *Options) error { o.Compression = enabled return nil } } // ProxyPath is an option for websocket connections that adds a path to connections url. // This is useful when connecting to NATS behind a proxy. func ProxyPath(path string) Option { return func(o *Options) error { o.ProxyPath = path return nil } } // CustomInboxPrefix configures the request + reply inbox prefix func CustomInboxPrefix(p string) Option { return func(o *Options) error { if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") { return errors.New("nats: invalid custom prefix") } o.InboxPrefix = p return nil } } // IgnoreAuthErrorAbort opts out of the default connect behavior of aborting // subsequent reconnect attempts if server returns the same auth error twice. func IgnoreAuthErrorAbort() Option { return func(o *Options) error { o.IgnoreAuthErrorAbort = true return nil } } // SkipHostLookup is an Option to skip the host lookup when connecting to a server. func SkipHostLookup() Option { return func(o *Options) error { o.SkipHostLookup = true return nil } } func PermissionErrOnSubscribe(enabled bool) Option { return func(o *Options) error { o.PermissionErrOnSubscribe = enabled return nil } } // TLSHandshakeFirst is an Option to perform the TLS handshake first, that is // before receiving the INFO protocol. This requires the server to also be // configured with such option, otherwise the connection will fail. func TLSHandshakeFirst() Option { return func(o *Options) error { o.TLSHandshakeFirst = true o.Secure = true return nil } } // Handler processing // SetDisconnectHandler will set the disconnect event handler. // Deprecated: Use SetDisconnectErrHandler func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DisconnectedCB = dcb } // SetDisconnectErrHandler will set the disconnect event handler. func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DisconnectedErrCB = dcb } // DisconnectErrHandler will return the disconnect event handler. func (nc *Conn) DisconnectErrHandler() ConnErrHandler { if nc == nil { return nil } nc.mu.Lock() defer nc.mu.Unlock() return nc.Opts.DisconnectedErrCB } // SetReconnectHandler will set the reconnect event handler. func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.ReconnectedCB = rcb } // ReconnectHandler will return the reconnect event handler. func (nc *Conn) ReconnectHandler() ConnHandler { if nc == nil { return nil } nc.mu.Lock() defer nc.mu.Unlock() return nc.Opts.ReconnectedCB } // SetDiscoveredServersHandler will set the discovered servers handler. func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.DiscoveredServersCB = dscb } // DiscoveredServersHandler will return the discovered servers handler. func (nc *Conn) DiscoveredServersHandler() ConnHandler { if nc == nil { return nil } nc.mu.Lock() defer nc.mu.Unlock() return nc.Opts.DiscoveredServersCB } // SetClosedHandler will set the closed event handler. func (nc *Conn) SetClosedHandler(cb ConnHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.ClosedCB = cb } // ClosedHandler will return the closed event handler. func (nc *Conn) ClosedHandler() ConnHandler { if nc == nil { return nil } nc.mu.Lock() defer nc.mu.Unlock() return nc.Opts.ClosedCB } // SetErrorHandler will set the async error handler. func (nc *Conn) SetErrorHandler(cb ErrHandler) { if nc == nil { return } nc.mu.Lock() defer nc.mu.Unlock() nc.Opts.AsyncErrorCB = cb } // ErrorHandler will return the async error handler. func (nc *Conn) ErrorHandler() ErrHandler { if nc == nil { return nil } nc.mu.Lock() defer nc.mu.Unlock() return nc.Opts.AsyncErrorCB } // Process the url string argument to Connect. // Return an array of urls, even if only one. func processUrlString(url string) []string { urls := strings.Split(url, ",") var j int for _, s := range urls { u := strings.TrimSuffix(strings.TrimSpace(s), "/") if len(u) > 0 { urls[j] = u j++ } } return urls[:j] } // Connect will attempt to connect to a NATS server with multiple options. func (o Options) Connect() (*Conn, error) { nc := &Conn{Opts: o} // Some default options processing. if nc.Opts.MaxPingsOut == 0 { nc.Opts.MaxPingsOut = DefaultMaxPingOut } // Allow old default for channel length to work correctly. if nc.Opts.SubChanLen == 0 { nc.Opts.SubChanLen = DefaultMaxChanLen } // Default ReconnectBufSize if nc.Opts.ReconnectBufSize == 0 { nc.Opts.ReconnectBufSize = DefaultReconnectBufSize } // Ensure that Timeout is not 0 if nc.Opts.Timeout == 0 { nc.Opts.Timeout = DefaultTimeout } // Check first for user jwt callback being defined and nkey. if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { return nil, ErrNkeyAndUser } // Check if we have an nkey but no signature callback defined. if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { return nil, ErrNkeyButNoSigCB } // Allow custom Dialer for connecting using a timeout by default if nc.Opts.Dialer == nil { nc.Opts.Dialer = &net.Dialer{ Timeout: nc.Opts.Timeout, } } // If the TLSHandshakeFirst option is specified, make sure that // the Secure boolean is true. if nc.Opts.TLSHandshakeFirst { nc.Opts.Secure = true } if err := nc.setupServerPool(); err != nil { return nil, err } // Create the async callback handler. nc.ach = &asyncCallbacksHandler{} nc.ach.cond = sync.NewCond(&nc.ach.mu) // Set a default error handler that will print to stderr. if nc.Opts.AsyncErrorCB == nil { nc.Opts.AsyncErrorCB = defaultErrHandler } // Create reader/writer nc.newReaderWriter() connectionEstablished, err := nc.connect() if err != nil { return nil, err } // Spin up the async cb dispatcher on success go nc.ach.asyncCBDispatcher() if connectionEstablished && nc.Opts.ConnectedCB != nil { nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) } return nc, nil } func defaultErrHandler(nc *Conn, sub *Subscription, err error) { var cid uint64 if nc != nil { nc.mu.RLock() cid = nc.info.CID nc.mu.RUnlock() } var errStr string if sub != nil { var subject string sub.mu.Lock() if sub.jsi != nil { subject = sub.jsi.psubj } else { subject = sub.Subject } sub.mu.Unlock() errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject) } else { errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid) } os.Stderr.WriteString(errStr) } const ( _CRLF_ = "\r\n" _EMPTY_ = "" _SPC_ = " " _PUB_P_ = "PUB " _HPUB_P_ = "HPUB " ) var _CRLF_BYTES_ = []byte(_CRLF_) const ( _OK_OP_ = "+OK" _ERR_OP_ = "-ERR" _PONG_OP_ = "PONG" _INFO_OP_ = "INFO" ) const ( connectProto = "CONNECT %s" + _CRLF_ pingProto = "PING" + _CRLF_ pongProto = "PONG" + _CRLF_ subProto = "SUB %s %s %d" + _CRLF_ unsubProto = "UNSUB %d %s" + _CRLF_ okProto = _OK_OP_ + _CRLF_ ) // Return the currently selected server func (nc *Conn) currentServer() (int, *srv) { for i, s := range nc.srvPool { if s == nil { continue } if s == nc.current { return i, s } } return -1, nil } // Pop the current server and put onto the end of the list. Select head of list as long // as number of reconnect attempts under MaxReconnect. func (nc *Conn) selectNextServer() (*srv, error) { i, s := nc.currentServer() if i < 0 { return nil, ErrNoServers } sp := nc.srvPool num := len(sp) copy(sp[i:num-1], sp[i+1:num]) maxReconnect := nc.Opts.MaxReconnect if maxReconnect < 0 || s.reconnects < maxReconnect { nc.srvPool[num-1] = s } else { nc.srvPool = sp[0 : num-1] } if len(nc.srvPool) <= 0 { nc.current = nil return nil, ErrNoServers } nc.current = nc.srvPool[0] return nc.srvPool[0], nil } // Will assign the correct server to nc.current func (nc *Conn) pickServer() error { nc.current = nil if len(nc.srvPool) <= 0 { return ErrNoServers } for _, s := range nc.srvPool { if s != nil { nc.current = s return nil } } return ErrNoServers } const tlsScheme = "tls" // Create the server pool using the options given. // We will place a Url option first, followed by any // Server Options. We will randomize the server pool unless // the NoRandomize flag is set. func (nc *Conn) setupServerPool() error { nc.srvPool = make([]*srv, 0, srvPoolSize) nc.urls = make(map[string]struct{}, srvPoolSize) // Create srv objects from each url string in nc.Opts.Servers // and add them to the pool. for _, urlString := range nc.Opts.Servers { if err := nc.addURLToPool(urlString, false, false); err != nil { return err } } // Randomize if allowed to if !nc.Opts.NoRandomize { nc.shufflePool(0) } // Normally, if this one is set, Options.Servers should not be, // but we always allowed that, so continue to do so. if nc.Opts.Url != _EMPTY_ { // Add to the end of the array if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { return err } // Then swap it with first to guarantee that Options.Url is tried first. last := len(nc.srvPool) - 1 if last > 0 { nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] } } else if len(nc.srvPool) <= 0 { // Place default URL if pool is empty. if err := nc.addURLToPool(DefaultURL, false, false); err != nil { return err } } // Check for Scheme hint to move to TLS mode. for _, srv := range nc.srvPool { if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS { // FIXME(dlc), this is for all in the pool, should be case by case. nc.Opts.Secure = true if nc.Opts.TLSConfig == nil { nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} } } } return nc.pickServer() } // Helper function to return scheme func (nc *Conn) connScheme() string { if nc.ws { if nc.Opts.Secure { return wsSchemeTLS } return wsScheme } if nc.Opts.Secure { return tlsScheme } return "nats" } // Return true iff u.Hostname() is an IP address. func hostIsIP(u *url.URL) bool { return net.ParseIP(u.Hostname()) != nil } // addURLToPool adds an entry to the server pool func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { if !strings.Contains(sURL, "://") { sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) } var ( u *url.URL err error ) for i := 0; i < 2; i++ { u, err = url.Parse(sURL) if err != nil { return err } if u.Port() != "" { break } // In case given URL is of the form "localhost:", just add // the port number at the end, otherwise, add ":4222". if sURL[len(sURL)-1] != ':' { sURL += ":" } switch u.Scheme { case wsScheme: sURL += defaultWSPortString case wsSchemeTLS: sURL += defaultWSSPortString default: sURL += defaultPortString } } isWS := isWebsocketScheme(u) // We don't support mix and match of websocket and non websocket URLs. // If this is the first URL, then we accept and switch the global state // to websocket. After that, we will know how to reject mixed URLs. if len(nc.srvPool) == 0 { nc.ws = isWS } else if isWS && !nc.ws || !isWS && nc.ws { return errors.New("mixing of websocket and non websocket URLs is not allowed") } var tlsName string if implicit { curl := nc.current.url // Check to see if we do not have a url.User but current connected // url does. If so copy over. if u.User == nil && curl.User != nil { u.User = curl.User } // We are checking to see if we have a secure connection and are // adding an implicit server that just has an IP. If so we will remember // the current hostname we are connected to. if saveTLSName && hostIsIP(u) { tlsName = curl.Hostname() } } s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} nc.srvPool = append(nc.srvPool, s) nc.urls[u.Host] = struct{}{} return nil } // shufflePool swaps randomly elements in the server pool // The `offset` value indicates that the shuffling should start at // this offset and leave the elements from [0..offset) intact. func (nc *Conn) shufflePool(offset int) { if len(nc.srvPool) <= offset+1 { return } source := rand.NewSource(time.Now().UnixNano()) r := rand.New(source) for i := offset; i < len(nc.srvPool); i++ { j := offset + r.Intn(i+1-offset) nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] } } func (nc *Conn) newReaderWriter() { nc.br = &natsReader{ buf: make([]byte, defaultBufSize), off: -1, } nc.bw = &natsWriter{ limit: defaultBufSize, plimit: nc.Opts.ReconnectBufSize, } } func (nc *Conn) bindToNewConn() { bw := nc.bw bw.w, bw.bufs = nc.newWriter(), nil br := nc.br br.r, br.n, br.off = nc.conn, 0, -1 } func (nc *Conn) newWriter() io.Writer { var w io.Writer = nc.conn if nc.Opts.FlusherTimeout > 0 { w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} } return w } func (w *natsWriter) appendString(str string) error { return w.appendBufs([]byte(str)) } func (w *natsWriter) appendBufs(bufs ...[]byte) error { for _, buf := range bufs { if len(buf) == 0 { continue } if w.pending != nil { w.pending.Write(buf) } else { w.bufs = append(w.bufs, buf...) } } if w.pending == nil && len(w.bufs) >= w.limit { return w.flush() } return nil } func (w *natsWriter) writeDirect(strs ...string) error { for _, str := range strs { if _, err := w.w.Write([]byte(str)); err != nil { return err } } return nil } func (w *natsWriter) flush() error { // If a pending buffer is set, we don't flush. Code that needs to // write directly to the socket, by-passing buffers during (re)connect, // will use the writeDirect() API. if w.pending != nil { return nil } // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because // the actual writer (if websocket for instance) may have things // to do such as sending control frames, etc.. _, err := w.w.Write(w.bufs) w.bufs = w.bufs[:0] return err } func (w *natsWriter) buffered() int { if w.pending != nil { return w.pending.Len() } return len(w.bufs) } func (w *natsWriter) switchToPending() { w.pending = new(bytes.Buffer) } func (w *natsWriter) flushPendingBuffer() error { if w.pending == nil || w.pending.Len() == 0 { return nil } _, err := w.w.Write(w.pending.Bytes()) // Reset the pending buffer at this point because we don't want // to take the risk of sending duplicates or partials. w.pending.Reset() return err } func (w *natsWriter) atLimitIfUsingPending() bool { if w.pending == nil { return false } return w.pending.Len() >= w.plimit } func (w *natsWriter) doneWithPending() { w.pending = nil } // Notify the reader that we are done with the connect, where "read" operations // happen synchronously and under the connection lock. After this point, "read" // will be happening from the read loop, without the connection lock. // // Note: this runs under the connection lock. func (r *natsReader) doneWithConnect() { if wsr, ok := r.r.(*websocketReader); ok { wsr.doneWithConnect() } } func (r *natsReader) Read() ([]byte, error) { if r.off >= 0 { off := r.off r.off = -1 return r.buf[off:r.n], nil } var err error r.n, err = r.r.Read(r.buf) return r.buf[:r.n], err } func (r *natsReader) ReadString(delim byte) (string, error) { var s string build_string: // First look if we have something in the buffer if r.off >= 0 { i := bytes.IndexByte(r.buf[r.off:r.n], delim) if i >= 0 { end := r.off + i + 1 s += string(r.buf[r.off:end]) r.off = end if r.off >= r.n { r.off = -1 } return s, nil } // We did not find the delim, so will have to read more. s += string(r.buf[r.off:r.n]) r.off = -1 } if _, err := r.Read(); err != nil { return s, err } r.off = 0 goto build_string } // createConn will connect to the server and wrap the appropriate // bufio structures. It will do the right thing when an existing // connection is in place. func (nc *Conn) createConn() (err error) { if nc.Opts.Timeout < 0 { return ErrBadTimeout } if _, cur := nc.currentServer(); cur == nil { return ErrNoServers } // If we have a reference to an in-process server then establish a // connection using that. if nc.Opts.InProcessServer != nil { conn, err := nc.Opts.InProcessServer.InProcessConn() if err != nil { return fmt.Errorf("failed to get in-process connection: %w", err) } nc.conn = conn nc.bindToNewConn() return nil } // We will auto-expand host names if they resolve to multiple IPs hosts := []string{} u := nc.current.url if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil { addrs, _ := net.LookupHost(u.Hostname()) for _, addr := range addrs { hosts = append(hosts, net.JoinHostPort(addr, u.Port())) } } // Fall back to what we were given. if len(hosts) == 0 { hosts = append(hosts, u.Host) } // CustomDialer takes precedence. If not set, use Opts.Dialer which // is set to a default *net.Dialer (in Connect()) if not explicitly // set by the user. dialer := nc.Opts.CustomDialer if dialer == nil { // We will copy and shorten the timeout if we have multiple hosts to try. copyDialer := *nc.Opts.Dialer copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) dialer = ©Dialer } if len(hosts) > 1 && !nc.Opts.NoRandomize { rand.Shuffle(len(hosts), func(i, j int) { hosts[i], hosts[j] = hosts[j], hosts[i] }) } for _, host := range hosts { nc.conn, err = dialer.Dial("tcp", host) if err == nil { break } } if err != nil { return err } // If scheme starts with "ws" then branch out to websocket code. if isWebsocketScheme(u) { return nc.wsInitHandshake(u) } // Reset reader/writer to this new TCP connection nc.bindToNewConn() return nil } type skipTLSDialer interface { SkipTLSHandshake() bool } // makeTLSConn will wrap an existing Conn using TLS func (nc *Conn) makeTLSConn() error { if nc.Opts.CustomDialer != nil { // we do nothing when asked to skip the TLS wrapper sd, ok := nc.Opts.CustomDialer.(skipTLSDialer) if ok && sd.SkipTLSHandshake() { return nil } } // Allow the user to configure their own tls.Config structure. tlsCopy := &tls.Config{} if nc.Opts.TLSConfig != nil { tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) } if nc.Opts.TLSCertCB != nil { cert, err := nc.Opts.TLSCertCB() if err != nil { return err } tlsCopy.Certificates = []tls.Certificate{cert} } if nc.Opts.RootCAsCB != nil { rootCAs, err := nc.Opts.RootCAsCB() if err != nil { return err } tlsCopy.RootCAs = rootCAs } // If its blank we will override it with the current host if tlsCopy.ServerName == _EMPTY_ { if nc.current.tlsName != _EMPTY_ { tlsCopy.ServerName = nc.current.tlsName } else { h, _, _ := net.SplitHostPort(nc.current.url.Host) tlsCopy.ServerName = h } } nc.conn = tls.Client(nc.conn, tlsCopy) conn := nc.conn.(*tls.Conn) if err := conn.Handshake(); err != nil { return err } nc.bindToNewConn() return nil } // TLSConnectionState retrieves the state of the TLS connection to the server func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) { if !nc.isConnected() { return tls.ConnectionState{}, ErrDisconnected } nc.mu.RLock() conn := nc.conn nc.mu.RUnlock() tc, ok := conn.(*tls.Conn) if !ok { return tls.ConnectionState{}, ErrConnectionNotTLS } return tc.ConnectionState(), nil } // waitForExits will wait for all socket watcher Go routines to // be shutdown before proceeding. func (nc *Conn) waitForExits() { // Kick old flusher forcefully. select { case nc.fch <- struct{}{}: default: } // Wait for any previous go routines. nc.wg.Wait() } // ForceReconnect forces a reconnect attempt to the server. // This is a non-blocking call and will start the reconnect // process without waiting for it to complete. // // If the connection is already in the process of reconnecting, // this call will force an immediate reconnect attempt (bypassing // the current reconnect delay). func (nc *Conn) ForceReconnect() error { nc.mu.Lock() defer nc.mu.Unlock() if nc.isClosed() { return ErrConnectionClosed } if nc.isReconnecting() { // if we're already reconnecting, force a reconnect attempt // even if we're in the middle of a backoff if nc.rqch != nil { close(nc.rqch) nc.rqch = nil } return nil } // Clear any queued pongs nc.clearPendingFlushCalls() // Clear any queued and blocking requests. nc.clearPendingRequestCalls() // Stop ping timer if set. nc.stopPingTimer() // Go ahead and make sure we have flushed the outbound nc.bw.flush() nc.conn.Close() nc.changeConnStatus(RECONNECTING) go nc.doReconnect(nil, true) return nil } // ConnectedUrl reports the connected server's URL func (nc *Conn) ConnectedUrl() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.current.url.String() } // ConnectedUrlRedacted reports the connected server's URL with passwords redacted func (nc *Conn) ConnectedUrlRedacted() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.current.url.Redacted() } // ConnectedAddr returns the connected server's IP func (nc *Conn) ConnectedAddr() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.conn.RemoteAddr().String() } // ConnectedServerId reports the connected server's Id func (nc *Conn) ConnectedServerId() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.info.ID } // ConnectedServerName reports the connected server's name func (nc *Conn) ConnectedServerName() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.info.Name } var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`) func versionComponents(version string) (major, minor, patch int, err error) { m := semVerRe.FindStringSubmatch(version) if m == nil { return 0, 0, 0, errors.New("invalid semver") } major, err = strconv.Atoi(m[1]) if err != nil { return -1, -1, -1, err } minor, err = strconv.Atoi(m[2]) if err != nil { return -1, -1, -1, err } patch, err = strconv.Atoi(m[3]) if err != nil { return -1, -1, -1, err } return major, minor, patch, err } // Check for minimum server requirement. func (nc *Conn) serverMinVersion(major, minor, patch int) bool { smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion()) if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) { return false } return true } // ConnectedServerVersion reports the connected server's version as a string func (nc *Conn) ConnectedServerVersion() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.info.Version } // ConnectedClusterName reports the connected server's cluster name if any func (nc *Conn) ConnectedClusterName() string { if nc == nil { return _EMPTY_ } nc.mu.RLock() defer nc.mu.RUnlock() if nc.status != CONNECTED { return _EMPTY_ } return nc.info.Cluster } // Low level setup for structs, etc func (nc *Conn) setup() { nc.subs = make(map[int64]*Subscription) nc.pongs = make([]chan struct{}, 0, 8) nc.fch = make(chan struct{}, flushChanSize) nc.rqch = make(chan struct{}) // Setup scratch outbound buffer for PUB/HPUB pub := nc.scratch[:len(_HPUB_P_)] copy(pub, _HPUB_P_) } // Process a connected connection and initialize properly. func (nc *Conn) processConnectInit() error { // Set our deadline for the whole connect process nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) defer nc.conn.SetDeadline(time.Time{}) // Set our status to connecting. nc.changeConnStatus(CONNECTING) // If we need to have a TLS connection and want the TLS handshake to occur // first, do it now. if nc.Opts.Secure && nc.Opts.TLSHandshakeFirst { if err := nc.makeTLSConn(); err != nil { return err } } // Process the INFO protocol received from the server err := nc.processExpectedInfo() if err != nil { return err } // Send the CONNECT protocol along with the initial PING protocol. // Wait for the PONG response (or any error that we get from the server). err = nc.sendConnect() if err != nil { return err } // Reset the number of PING sent out nc.pout = 0 // Start or reset Timer if nc.Opts.PingInterval > 0 { if nc.ptmr == nil { nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) } else { nc.ptmr.Reset(nc.Opts.PingInterval) } } // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. nc.wg.Add(2) go nc.readLoop() go nc.flusher() // Notify the reader that we are done with the connect handshake, where // reads were done synchronously and under the connection lock. nc.br.doneWithConnect() return nil } // Main connect function. Will connect to the nats-server. func (nc *Conn) connect() (bool, error) { var err error var connectionEstablished bool // Create actual socket connection // For first connect we walk all servers in the pool and try // to connect immediately. nc.mu.Lock() defer nc.mu.Unlock() nc.initc = true // The pool may change inside the loop iteration due to INFO protocol. for i := 0; i < len(nc.srvPool); i++ { nc.current = nc.srvPool[i] if err = nc.createConn(); err == nil { // This was moved out of processConnectInit() because // that function is now invoked from doReconnect() too. nc.setup() err = nc.processConnectInit() if err == nil { nc.current.didConnect = true nc.current.reconnects = 0 nc.current.lastErr = nil break } else { nc.mu.Unlock() nc.close(DISCONNECTED, false, err) nc.mu.Lock() // Do not reset nc.current here since it would prevent // RetryOnFailedConnect to work should this be the last server // to try before starting doReconnect(). } } else { // Cancel out default connection refused, will trigger the // No servers error conditional if strings.Contains(err.Error(), "connection refused") { err = nil } } } if err == nil && nc.status != CONNECTED { err = ErrNoServers } if err == nil { connectionEstablished = true nc.initc = false } else if nc.Opts.RetryOnFailedConnect { nc.setup() nc.changeConnStatus(RECONNECTING) nc.bw.switchToPending() go nc.doReconnect(ErrNoServers, false) err = nil } else { nc.current = nil } return connectionEstablished, err } // This will check to see if the connection should be // secure. This can be dictated from either end and should // only be called after the INIT protocol has been received. func (nc *Conn) checkForSecure() error { // Check to see if we need to engage TLS o := nc.Opts // Check for mismatch in setups if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable { return ErrSecureConnWanted } else if nc.info.TLSRequired && !o.Secure { // Switch to Secure since server needs TLS. o.Secure = true } if o.Secure { // If TLS handshake first is true, we have already done // the handshake, so we are done here. if o.TLSHandshakeFirst { return nil } // Need to rewrap with bufio if err := nc.makeTLSConn(); err != nil { return err } } return nil } // processExpectedInfo will look for the expected first INFO message // sent when a connection is established. The lock should be held entering. func (nc *Conn) processExpectedInfo() error { c := &control{} // Read the protocol err := nc.readOp(c) if err != nil { return err } // The nats protocol should send INFO first always. if c.op != _INFO_OP_ { return ErrNoInfoReceived } // Parse the protocol if err := nc.processInfo(c.args); err != nil { return err } if nc.Opts.Nkey != "" && nc.info.Nonce == "" { return ErrNkeysNotSupported } // For websocket connections, we already switched to TLS if need be, // so we are done here. if nc.ws { return nil } return nc.checkForSecure() } // Sends a protocol control message by queuing into the bufio writer // and kicking the flush Go routine. These writes are protected. func (nc *Conn) sendProto(proto string) { nc.mu.Lock() nc.bw.appendString(proto) nc.kickFlusher() nc.mu.Unlock() } // Generate a connect protocol message, issuing user/password if // applicable. The lock is assumed to be held upon entering. func (nc *Conn) connectProto() (string, error) { o := nc.Opts var nkey, sig, user, pass, token, ujwt string u := nc.current.url.User if u != nil { // if no password, assume username is authToken if _, ok := u.Password(); !ok { token = u.Username() } else { user = u.Username() pass, _ = u.Password() } } else { // Take from options (possibly all empty strings) user = o.User pass = o.Password token = o.Token nkey = o.Nkey if nc.Opts.UserInfo != nil { if user != _EMPTY_ || pass != _EMPTY_ { return _EMPTY_, ErrUserInfoAlreadySet } user, pass = nc.Opts.UserInfo() } } // Look for user jwt. if o.UserJWT != nil { if jwt, err := o.UserJWT(); err != nil { return _EMPTY_, err } else { ujwt = jwt } if nkey != _EMPTY_ { return _EMPTY_, ErrNkeyAndUser } } if ujwt != _EMPTY_ || nkey != _EMPTY_ { if o.SignatureCB == nil { if ujwt == _EMPTY_ { return _EMPTY_, ErrNkeyButNoSigCB } return _EMPTY_, ErrUserButNoSigCB } sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) if err != nil { return _EMPTY_, fmt.Errorf("error signing nonce: %w", err) } sig = base64.RawURLEncoding.EncodeToString(sigraw) } if nc.Opts.TokenHandler != nil { if token != _EMPTY_ { return _EMPTY_, ErrTokenAlreadySet } token = nc.Opts.TokenHandler() } // If our server does not support headers then we can't do them or no responders. hdrs := nc.info.Headers cinfo := connectInfo{ o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs, } b, err := json.Marshal(cinfo) if err != nil { return _EMPTY_, ErrJsonParse } // Check if NoEcho is set and we have a server that supports it. if o.NoEcho && nc.info.Proto < 1 { return _EMPTY_, ErrNoEchoNotSupported } return fmt.Sprintf(connectProto, b), nil } // normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. func normalizeErr(line string) string { s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") return s } // natsProtoErr represents an -ERR protocol message sent by the server. type natsProtoErr struct { description string } func (nerr *natsProtoErr) Error() string { return fmt.Sprintf("nats: %s", nerr.description) } func (nerr *natsProtoErr) Is(err error) bool { return strings.ToLower(nerr.Error()) == err.Error() } // Send a connect protocol message to the server, issue user/password if // applicable. Will wait for a flush to return from the server for error // processing. func (nc *Conn) sendConnect() error { // Construct the CONNECT protocol string cProto, err := nc.connectProto() if err != nil { if !nc.initc && nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } return err } // Write the protocol and PING directly to the underlying writer. if err := nc.bw.writeDirect(cProto, pingProto); err != nil { return err } // We don't want to read more than we need here, otherwise // we would need to transfer the excess read data to the readLoop. // Since in normal situations we just are looking for a PONG\r\n, // reading byte-by-byte here is ok. proto, err := nc.readProto() if err != nil { if !nc.initc && nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } return err } // If opts.Verbose is set, handle +OK if nc.Opts.Verbose && proto == okProto { // Read the rest now... proto, err = nc.readProto() if err != nil { if !nc.initc && nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } return err } } // We expect a PONG if proto != pongProto { // But it could be something else, like -ERR // Since we no longer use ReadLine(), trim the trailing "\r\n" proto = strings.TrimRight(proto, "\r\n") // If it's a server error... if strings.HasPrefix(proto, _ERR_OP_) { // Remove -ERR, trim spaces and quotes, and convert to lower case. proto = normalizeErr(proto) // Check if this is an auth error if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { // This will schedule an async error if we are in reconnect, // and keep track of the auth error for the current server. // If we have got the same error twice, this sets nc.ar to true to // indicate that the reconnect should be aborted (will be checked // in doReconnect()). nc.processAuthError(authErr) } return &natsProtoErr{proto} } // Notify that we got an unexpected protocol. return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) } // This is where we are truly connected. nc.changeConnStatus(CONNECTED) return nil } // reads a protocol line. func (nc *Conn) readProto() (string, error) { return nc.br.ReadString('\n') } // A control protocol line. type control struct { op, args string } // Read a control line and process the intended op. func (nc *Conn) readOp(c *control) error { line, err := nc.readProto() if err != nil { return err } parseControl(line, c) return nil } // Parse a control line from the server. func parseControl(line string, c *control) { toks := strings.SplitN(line, _SPC_, 2) if len(toks) == 1 { c.op = strings.TrimSpace(toks[0]) c.args = _EMPTY_ } else if len(toks) == 2 { c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) } else { c.op = _EMPTY_ } } // flushReconnectPendingItems will push the pending items that were // gathered while we were in a RECONNECTING state to the socket. func (nc *Conn) flushReconnectPendingItems() error { return nc.bw.flushPendingBuffer() } // Stops the ping timer if set. // Connection lock is held on entry. func (nc *Conn) stopPingTimer() { if nc.ptmr != nil { nc.ptmr.Stop() } } // Try to reconnect using the option parameters. // This function assumes we are allowed to reconnect. func (nc *Conn) doReconnect(err error, forceReconnect bool) { // We want to make sure we have the other watchers shutdown properly // here before we proceed past this point. nc.waitForExits() // FIXME(dlc) - We have an issue here if we have // outstanding flush points (pongs) and they were not // sent out, but are still in the pipe. // Hold the lock manually and release where needed below, // can't do defer here. nc.mu.Lock() // Clear any errors. nc.err = nil // Perform appropriate callback if needed for a disconnect. // DisconnectedErrCB has priority over deprecated DisconnectedCB if !nc.initc { if nc.Opts.DisconnectedErrCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) } else if nc.Opts.DisconnectedCB != nil { nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) } } // This is used to wait on go routines exit if we start them in the loop // but an error occurs after that. waitForGoRoutines := false var rt *time.Timer // Channel used to kick routine out of sleep when conn is closed. rqch := nc.rqch // if rqch is nil, we need to set it up to signal // the reconnect loop to reconnect immediately // this means that `ForceReconnect` was called // before entering doReconnect if rqch == nil { rqch = make(chan struct{}) close(rqch) } // Counter that is increased when the whole list of servers has been tried. var wlf int var jitter time.Duration var rw time.Duration // If a custom reconnect delay handler is set, this takes precedence. crd := nc.Opts.CustomReconnectDelayCB if crd == nil { rw = nc.Opts.ReconnectWait // TODO: since we sleep only after the whole list has been tried, we can't // rely on individual *srv to know if it is a TLS or non-TLS url. // We have to pick which type of jitter to use, for now, we use these hints: jitter = nc.Opts.ReconnectJitter if nc.Opts.Secure || nc.Opts.TLSConfig != nil { jitter = nc.Opts.ReconnectJitterTLS } } for i := 0; len(nc.srvPool) > 0; { cur, err := nc.selectNextServer() if err != nil { nc.err = err break } doSleep := i+1 >= len(nc.srvPool) && !forceReconnect forceReconnect = false nc.mu.Unlock() if !doSleep { i++ // Release the lock to give a chance to a concurrent nc.Close() to break the loop. runtime.Gosched() } else { i = 0 var st time.Duration if crd != nil { wlf++ st = crd(wlf) } else { st = rw if jitter > 0 { st += time.Duration(rand.Int63n(int64(jitter))) } } if rt == nil { rt = time.NewTimer(st) } else { rt.Reset(st) } select { case <-rqch: rt.Stop() // we need to reset the rqch channel to avoid // closing a closed channel in the next iteration nc.mu.Lock() nc.rqch = make(chan struct{}) nc.mu.Unlock() case <-rt.C: } } // If the readLoop, etc.. go routines were started, wait for them to complete. if waitForGoRoutines { nc.waitForExits() waitForGoRoutines = false } nc.mu.Lock() // Check if we have been closed first. if nc.isClosed() { break } // Mark that we tried a reconnect cur.reconnects++ // Try to create a new connection err = nc.createConn() // Not yet connected, retry... // Continue to hold the lock if err != nil { // Perform appropriate callback for a failed connection attempt. if nc.Opts.ReconnectErrCB != nil { nc.ach.push(func() { nc.Opts.ReconnectErrCB(nc, err) }) } nc.err = nil continue } // We are reconnected nc.Reconnects++ // Process connect logic if nc.err = nc.processConnectInit(); nc.err != nil { // Check if we should abort reconnect. If so, break out // of the loop and connection will be closed. if nc.ar { break } nc.changeConnStatus(RECONNECTING) continue } // Clear possible lastErr under the connection lock after // a successful processConnectInit(). nc.current.lastErr = nil // Clear out server stats for the server we connected to.. cur.didConnect = true cur.reconnects = 0 // Send existing subscription state nc.resendSubscriptions() // Now send off and clear pending buffer nc.err = nc.flushReconnectPendingItems() if nc.err != nil { nc.changeConnStatus(RECONNECTING) // Stop the ping timer (if set) nc.stopPingTimer() // Since processConnectInit() returned without error, the // go routines were started, so wait for them to return // on the next iteration (after releasing the lock). waitForGoRoutines = true continue } // Done with the pending buffer nc.bw.doneWithPending() // Queue up the correct callback. If we are in initial connect state // (using retry on failed connect), we will call the ConnectedCB, // otherwise the ReconnectedCB. if nc.Opts.ReconnectedCB != nil && !nc.initc { nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) } else if nc.Opts.ConnectedCB != nil && nc.initc { nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) } // If we are here with a retry on failed connect, indicate that the // initial connect is now complete. nc.initc = false // Release lock here, we will return below. nc.mu.Unlock() // Make sure to flush everything nc.Flush() return } // Call into close.. We have no servers left.. if nc.err == nil { nc.err = ErrNoServers } nc.mu.Unlock() nc.close(CLOSED, true, nil) } // processOpErr handles errors from reading or parsing the protocol. // The lock should not be held entering this function. func (nc *Conn) processOpErr(err error) bool { nc.mu.Lock() defer nc.mu.Unlock() if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { return false } if nc.Opts.AllowReconnect && nc.status == CONNECTED { // Set our new status nc.changeConnStatus(RECONNECTING) // Stop ping timer if set nc.stopPingTimer() if nc.conn != nil { nc.conn.Close() nc.conn = nil } // Create pending buffer before reconnecting. nc.bw.switchToPending() // Clear any queued pongs, e.g. pending flush calls. nc.clearPendingFlushCalls() go nc.doReconnect(err, false) return false } nc.changeConnStatus(DISCONNECTED) nc.err = err return true } // dispatch is responsible for calling any async callbacks func (ac *asyncCallbacksHandler) asyncCBDispatcher() { for { ac.mu.Lock() // Protect for spurious wakeups. We should get out of the // wait only if there is an element to pop from the list. for ac.head == nil { ac.cond.Wait() } cur := ac.head ac.head = cur.next if cur == ac.tail { ac.tail = nil } ac.mu.Unlock() // This signals that the dispatcher has been closed and all // previous callbacks have been dispatched. if cur.f == nil { return } // Invoke callback outside of handler's lock cur.f() } } // Add the given function to the tail of the list and // signals the dispatcher. func (ac *asyncCallbacksHandler) push(f func()) { ac.pushOrClose(f, false) } // Signals that we are closing... func (ac *asyncCallbacksHandler) close() { ac.pushOrClose(nil, true) } // Add the given function to the tail of the list and // signals the dispatcher. func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { ac.mu.Lock() defer ac.mu.Unlock() // Make sure that library is not calling push with nil function, // since this is used to notify the dispatcher that it should stop. if !close && f == nil { panic("pushing a nil callback") } cb := &asyncCB{f: f} if ac.tail != nil { ac.tail.next = cb } else { ac.head = cb } ac.tail = cb if close { ac.cond.Broadcast() } else { ac.cond.Signal() } } // readLoop() will sit on the socket reading and processing the // protocol from the server. It will dispatch appropriately based // on the op type. func (nc *Conn) readLoop() { // Release the wait group on exit defer nc.wg.Done() // Create a parseState if needed. nc.mu.Lock() if nc.ps == nil { nc.ps = &parseState{} } conn := nc.conn br := nc.br nc.mu.Unlock() if conn == nil { return } for { buf, err := br.Read() if err == nil { // With websocket, it is possible that there is no error but // also no buffer returned (either WS control message or read of a // partial compressed message). We could call parse(buf) which // would ignore an empty buffer, but simply go back to top of the loop. if len(buf) == 0 { continue } err = nc.parse(buf) } if err != nil { if shouldClose := nc.processOpErr(err); shouldClose { nc.close(CLOSED, true, nil) } break } } // Clear the parseState here.. nc.mu.Lock() nc.ps = nil nc.mu.Unlock() } // waitForMsgs waits on the conditional shared with readLoop and processMsg. // It is used to deliver messages to asynchronous subscribers. func (nc *Conn) waitForMsgs(s *Subscription) { var closed bool var delivered, max uint64 // Used to account for adjustments to sub.pBytes when we wrap back around. msgLen := -1 for { s.mu.Lock() // Do accounting for last msg delivered here so we only lock once // and drain state trips after callback has returned. if msgLen >= 0 { s.pMsgs-- s.pBytes -= msgLen msgLen = -1 } if s.pHead == nil && !s.closed { s.pCond.Wait() } // Pop the msg off the list m := s.pHead if m != nil { s.pHead = m.next if s.pHead == nil { s.pTail = nil } if m.barrier != nil { s.mu.Unlock() if atomic.AddInt64(&m.barrier.refs, -1) == 0 { m.barrier.f() } continue } msgLen = len(m.Data) } mcb := s.mcb max = s.max closed = s.closed var fcReply string if !s.closed { s.delivered++ delivered = s.delivered if s.jsi != nil { fcReply = s.checkForFlowControlResponse() } } s.mu.Unlock() // Respond to flow control if applicable if fcReply != _EMPTY_ { nc.Publish(fcReply, nil) } if closed { break } // Deliver the message. if m != nil && (max == 0 || delivered <= max) { mcb(m) } // If we have hit the max for delivered msgs, remove sub. if max > 0 && delivered >= max { nc.mu.Lock() nc.removeSub(s) nc.mu.Unlock() break } } // Check for barrier messages s.mu.Lock() for m := s.pHead; m != nil; m = s.pHead { if m.barrier != nil { s.mu.Unlock() if atomic.AddInt64(&m.barrier.refs, -1) == 0 { m.barrier.f() } s.mu.Lock() } s.pHead = m.next } // Now check for pDone done := s.pDone s.mu.Unlock() if done != nil { done(s.Subject) } } // Used for debugging and simulating loss for certain tests. // Return what is to be used. If we return nil the message will be dropped. type msgFilter func(m *Msg) *Msg // processMsg is called by parse and will place the msg on the // appropriate channel/pending queue for processing. If the channel is full, // or the pending queue is over the pending limits, the connection is // considered a slow consumer. func (nc *Conn) processMsg(data []byte) { // Stats atomic.AddUint64(&nc.InMsgs, 1) atomic.AddUint64(&nc.InBytes, uint64(len(data))) // Don't lock the connection to avoid server cutting us off if the // flusher is holding the connection lock, trying to send to the server // that is itself trying to send data to us. nc.subsMu.RLock() sub := nc.subs[nc.ps.ma.sid] var mf msgFilter if nc.filters != nil { mf = nc.filters[string(nc.ps.ma.subject)] } nc.subsMu.RUnlock() if sub == nil { return } // Copy them into string subj := string(nc.ps.ma.subject) reply := string(nc.ps.ma.reply) // Doing message create outside of the sub's lock to reduce contention. // It's possible that we end-up not using the message, but that's ok. // FIXME(dlc): Need to copy, should/can do COW? msgPayload := data if !nc.ps.msgCopied { msgPayload = make([]byte, len(data)) copy(msgPayload, data) } // Check if we have headers encoded here. var h Header var err error var ctrlMsg bool var ctrlType int var fcReply string if nc.ps.ma.hdr > 0 { hbuf := msgPayload[:nc.ps.ma.hdr] msgPayload = msgPayload[nc.ps.ma.hdr:] h, err = DecodeHeadersMsg(hbuf) if err != nil { // We will pass the message through but send async error. nc.mu.Lock() nc.err = ErrBadHeaderMsg if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) } nc.mu.Unlock() } } // FIXME(dlc): Should we recycle these containers? m := &Msg{ Subject: subj, Reply: reply, Header: h, Data: msgPayload, Sub: sub, wsz: len(data) + len(subj) + len(reply), } // Check for message filters. if mf != nil { if m = mf(m); m == nil { // Drop message. return } } sub.mu.Lock() // Check if closed. if sub.closed { sub.mu.Unlock() return } // Skip flow control messages in case of using a JetStream context. jsi := sub.jsi if jsi != nil { // There has to be a header for it to be a control message. if h != nil { ctrlMsg, ctrlType = isJSControlMessage(m) if ctrlMsg && ctrlType == jsCtrlHB { // Check if the heartbeat has a "Consumer Stalled" header, if // so, the value is the FC reply to send a nil message to. // We will send it at the end of this function. fcReply = m.Header.Get(consumerStalledHdr) } } // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap. if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) { sub.mu.Unlock() return } } // Skip processing if this is a control message and // if not a pull consumer heartbeat. For pull consumers, // heartbeats have to be handled on per request basis. if !ctrlMsg || (jsi != nil && jsi.pull) { var chanSubCheckFC bool // Subscription internal stats (applicable only for non ChanSubscription's) if sub.typ != ChanSubscription { sub.pMsgs++ if sub.pMsgs > sub.pMsgsMax { sub.pMsgsMax = sub.pMsgs } sub.pBytes += len(m.Data) if sub.pBytes > sub.pBytesMax { sub.pBytesMax = sub.pBytes } // Check for a Slow Consumer if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { goto slowConsumer } } else if jsi != nil { chanSubCheckFC = true } // We have two modes of delivery. One is the channel, used by channel // subscribers and syncSubscribers, the other is a linked list for async. if sub.mch != nil { select { case sub.mch <- m: default: goto slowConsumer } } else { // Push onto the async pList if sub.pHead == nil { sub.pHead = m sub.pTail = m if sub.pCond != nil { sub.pCond.Signal() } } else { sub.pTail.next = m sub.pTail = m } } if jsi != nil { // Store the ACK metadata from the message to // compare later on with the received heartbeat. sub.trackSequences(m.Reply) if chanSubCheckFC { // For ChanSubscription, since we can't call this when a message // is "delivered" (since user is pull from their own channel), // we have a go routine that does this check, however, we do it // also here to make it much more responsive. The go routine is // really to avoid stalling when there is no new messages coming. fcReply = sub.checkForFlowControlResponse() } } } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ { // This is a flow control message. // We will schedule the send of the FC reply once we have delivered the // DATA message that was received before this flow control message, which // has sequence `jsi.fciseq`. However, it is possible that this message // has already been delivered, in that case, we need to send the FC reply now. if sub.getJSDelivered() >= jsi.fciseq { fcReply = m.Reply } else { // Schedule a reply after the previous message is delivered. sub.scheduleFlowControlResponse(m.Reply) } } // Clear any SlowConsumer status. if sub.sc { sub.changeSubStatus(SubscriptionActive) } sub.sc = false sub.mu.Unlock() if fcReply != _EMPTY_ { nc.Publish(fcReply, nil) } // Handle control heartbeat messages. if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ { nc.checkForSequenceMismatch(m, sub, jsi) } return slowConsumer: sub.dropped++ sc := !sub.sc sub.sc = true // Undo stats from above if sub.typ != ChanSubscription { sub.pMsgs-- sub.pBytes -= len(m.Data) } if sc { sub.changeSubStatus(SubscriptionSlowConsumer) sub.mu.Unlock() // Now we need connection's lock and we may end-up in the situation // that we were trying to avoid, except that in this case, the client // is already experiencing client-side slow consumer situation. nc.mu.Lock() nc.err = ErrSlowConsumer if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) } nc.mu.Unlock() } else { sub.mu.Unlock() } } var ( permissionsRe = regexp.MustCompile(`Subscription to "(\S+)"`) permissionsQueueRe = regexp.MustCompile(`using queue "(\S+)"`) ) // processTransientError is called when the server signals a non terminal error // which does not close the connection or trigger a reconnect. // This will trigger the async error callback if set. // These errors include the following: // - permissions violation on publish or subscribe // - maximum subscriptions exceeded func (nc *Conn) processTransientError(err error) { nc.mu.Lock() nc.err = err if errors.Is(err, ErrPermissionViolation) { matches := permissionsRe.FindStringSubmatch(err.Error()) if len(matches) >= 2 { queueMatches := permissionsQueueRe.FindStringSubmatch(err.Error()) var q string if len(queueMatches) >= 2 { q = queueMatches[1] } subject := matches[1] for _, sub := range nc.subs { if sub.Subject == subject && sub.Queue == q && sub.permissionsErr == nil { sub.mu.Lock() if sub.errCh != nil { sub.errCh <- err } sub.permissionsErr = err sub.mu.Unlock() } } } } if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } nc.mu.Unlock() } // processAuthError generally processing for auth errors. We want to do retries // unless we get the same error again. This allows us for instance to swap credentials // and have the app reconnect, but if nothing is changing we should bail. // This function will return true if the connection should be closed, false otherwise. // Connection lock is held on entry func (nc *Conn) processAuthError(err error) bool { nc.err = err if !nc.initc && nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } // We should give up if we tried twice on this server and got the // same error. This behavior can be modified using IgnoreAuthErrorAbort. if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort { nc.ar = true } else { nc.current.lastErr = err } return nc.ar } // flusher is a separate Go routine that will process flush requests for the write // bufio. This allows coalescing of writes to the underlying socket. func (nc *Conn) flusher() { // Release the wait group defer nc.wg.Done() // snapshot the bw and conn since they can change from underneath of us. nc.mu.Lock() bw := nc.bw conn := nc.conn fch := nc.fch nc.mu.Unlock() if conn == nil || bw == nil { return } for { if _, ok := <-fch; !ok { return } nc.mu.Lock() // Check to see if we should bail out. if !nc.isConnected() || nc.isConnecting() || conn != nc.conn { nc.mu.Unlock() return } if bw.buffered() > 0 { if err := bw.flush(); err != nil { if nc.err == nil { nc.err = err } if nc.Opts.AsyncErrorCB != nil { nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) } } } nc.mu.Unlock() } } // processPing will send an immediate pong protocol response to the // server. The server uses this mechanism to detect dead clients. func (nc *Conn) processPing() { nc.sendProto(pongProto) } // processPong is used to process responses to the client's ping // messages. We use pings for the flush mechanism as well. func (nc *Conn) processPong() { var ch chan struct{} nc.mu.Lock() if len(nc.pongs) > 0 { ch = nc.pongs[0] nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...) } nc.pout = 0 nc.mu.Unlock() if ch != nil { ch <- struct{}{} } } // processOK is a placeholder for processing OK messages. func (nc *Conn) processOK() { // do nothing } // processInfo is used to parse the info messages sent // from the server. // This function may update the server pool. func (nc *Conn) processInfo(info string) error { if info == _EMPTY_ { return nil } var ncInfo serverInfo if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { return err } // Copy content into connection's info structure. nc.info = ncInfo // The array could be empty/not present on initial connect, // if advertise is disabled on that server, or servers that // did not include themselves in the async INFO protocol. // If empty, do not remove the implicit servers from the pool. if len(nc.info.ConnectURLs) == 0 { if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) } return nil } // Note about pool randomization: when the pool was first created, // it was randomized (if allowed). We keep the order the same (removing // implicit servers that are no longer sent to us). New URLs are sent // to us in no specific order so don't need extra randomization. hasNew := false // This is what we got from the server we are connected to. urls := nc.info.ConnectURLs // Transform that to a map for easy lookups tmp := make(map[string]struct{}, len(urls)) for _, curl := range urls { tmp[curl] = struct{}{} } // Walk the pool and removed the implicit servers that are no longer in the // given array/map sp := nc.srvPool for i := 0; i < len(sp); i++ { srv := sp[i] curl := srv.url.Host // Check if this URL is in the INFO protocol _, inInfo := tmp[curl] // Remove from the temp map so that at the end we are left with only // new (or restarted) servers that need to be added to the pool. delete(tmp, curl) // Keep servers that were set through Options, but also the one that // we are currently connected to (even if it is a discovered server). if !srv.isImplicit || srv.url == nc.current.url { continue } if !inInfo { // Remove from server pool. Keep current order. copy(sp[i:], sp[i+1:]) nc.srvPool = sp[:len(sp)-1] sp = nc.srvPool i-- } } // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. saveTLS := nc.current != nil && !hostIsIP(nc.current.url) // If there are any left in the tmp map, these are new (or restarted) servers // and need to be added to the pool. for curl := range tmp { // Before adding, check if this is a new (as in never seen) URL. // This is used to figure out if we invoke the DiscoveredServersCB if _, present := nc.urls[curl]; !present { hasNew = true } nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) } if hasNew { // Randomize the pool if allowed but leave the first URL in place. if !nc.Opts.NoRandomize { nc.shufflePool(1) } if !nc.initc && nc.Opts.DiscoveredServersCB != nil { nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) } } if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) } return nil } // processAsyncInfo does the same than processInfo, but is called // from the parser. Calls processInfo under connection's lock // protection. func (nc *Conn) processAsyncInfo(info []byte) { nc.mu.Lock() // Ignore errors, we will simply not update the server pool... nc.processInfo(string(info)) nc.mu.Unlock() } // LastError reports the last error encountered via the connection. // It can be used reliably within ClosedCB in order to find out reason // why connection was closed for example. func (nc *Conn) LastError() error { if nc == nil { return ErrInvalidConnection } nc.mu.RLock() err := nc.err nc.mu.RUnlock() return err } // Check if the given error string is an auth error, and if so returns // the corresponding ErrXXX error, nil otherwise func checkAuthError(e string) error { if strings.HasPrefix(e, AUTHORIZATION_ERR) { return ErrAuthorization } if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { return ErrAuthExpired } if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) { return ErrAuthRevoked } if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) { return ErrAccountAuthExpired } return nil } // processErr processes any error messages from the server and // sets the connection's LastError. func (nc *Conn) processErr(ie string) { // Trim, remove quotes ne := normalizeErr(ie) // convert to lower case. e := strings.ToLower(ne) var close bool // FIXME(dlc) - process Slow Consumer signals special. if e == STALE_CONNECTION { close = nc.processOpErr(ErrStaleConnection) } else if e == MAX_CONNECTIONS_ERR { close = nc.processOpErr(ErrMaxConnectionsExceeded) } else if strings.HasPrefix(e, PERMISSIONS_ERR) { nc.processTransientError(fmt.Errorf("%w: %s", ErrPermissionViolation, ne)) } else if strings.HasPrefix(e, MAX_SUBSCRIPTIONS_ERR) { nc.processTransientError(ErrMaxSubscriptionsExceeded) } else if authErr := checkAuthError(e); authErr != nil { nc.mu.Lock() close = nc.processAuthError(authErr) nc.mu.Unlock() } else { close = true nc.mu.Lock() nc.err = errors.New("nats: " + ne) nc.mu.Unlock() } if close { nc.close(CLOSED, true, nil) } } // kickFlusher will send a bool on a channel to kick the // flush Go routine to flush data to the server. func (nc *Conn) kickFlusher() { if nc.bw != nil { select { case nc.fch <- struct{}{}: default: } } } // Publish publishes the data argument to the given subject. The data // argument is left untouched and needs to be correctly interpreted on // the receiver. func (nc *Conn) Publish(subj string, data []byte) error { return nc.publish(subj, _EMPTY_, nil, data) } // Header represents the optional Header for a NATS message, // based on the implementation of http.Header. type Header map[string][]string // Add adds the key, value pair to the header. It is case-sensitive // and appends to any existing values associated with key. func (h Header) Add(key, value string) { h[key] = append(h[key], value) } // Set sets the header entries associated with key to the single // element value. It is case-sensitive and replaces any existing // values associated with key. func (h Header) Set(key, value string) { h[key] = []string{value} } // Get gets the first value associated with the given key. // It is case-sensitive. func (h Header) Get(key string) string { if h == nil { return _EMPTY_ } if v := h[key]; v != nil { return v[0] } return _EMPTY_ } // Values returns all values associated with the given key. // It is case-sensitive. func (h Header) Values(key string) []string { return h[key] } // Del deletes the values associated with a key. // It is case-sensitive. func (h Header) Del(key string) { delete(h, key) } // NewMsg creates a message for publishing that will use headers. func NewMsg(subject string) *Msg { return &Msg{ Subject: subject, Header: make(Header), } } const ( hdrLine = "NATS/1.0\r\n" crlf = "\r\n" hdrPreEnd = len(hdrLine) - len(crlf) statusHdr = "Status" descrHdr = "Description" lastConsumerSeqHdr = "Nats-Last-Consumer" lastStreamSeqHdr = "Nats-Last-Stream" consumerStalledHdr = "Nats-Consumer-Stalled" noResponders = "503" noMessagesSts = "404" reqTimeoutSts = "408" jetStream409Sts = "409" controlMsg = "100" statusLen = 3 // e.g. 20x, 40x, 50x ) // DecodeHeadersMsg will decode and headers. func DecodeHeadersMsg(data []byte) (Header, error) { br := bufio.NewReaderSize(bytes.NewReader(data), 128) tp := textproto.NewReader(br) l, err := tp.ReadLine() if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] { return nil, ErrBadHeaderMsg } mh, err := readMIMEHeader(tp) if err != nil { return nil, err } // Check if we have an inlined status. if len(l) > hdrPreEnd { var description string status := strings.TrimSpace(l[hdrPreEnd:]) if len(status) != statusLen { description = strings.TrimSpace(status[statusLen:]) status = status[:statusLen] } mh.Add(statusHdr, status) if len(description) > 0 { mh.Add(descrHdr, description) } } return Header(mh), nil } // readMIMEHeader returns a MIMEHeader that preserves the // original case of the MIME header, based on the implementation // of textproto.ReadMIMEHeader. // // https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) { m := make(textproto.MIMEHeader) for { kv, err := tp.ReadLine() if len(kv) == 0 { return m, err } // Process key fetching original case. i := strings.IndexByte(kv, ':') if i < 0 { return nil, ErrBadHeaderMsg } key := kv[:i] if key == "" { // Skip empty keys. continue } i++ for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { i++ } m[key] = append(m[key], kv[i:]) if err != nil { return m, err } } } // PublishMsg publishes the Msg structure, which includes the // Subject, an optional Reply and an optional Data field. func (nc *Conn) PublishMsg(m *Msg) error { if m == nil { return ErrInvalidMsg } hdr, err := m.headerBytes() if err != nil { return err } return nc.publish(m.Subject, m.Reply, hdr, m.Data) } // PublishRequest will perform a Publish() expecting a response on the // reply subject. Use Request() for automatically waiting for a response // inline. func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { return nc.publish(subj, reply, nil, data) } // Used for handrolled Itoa const digits = "0123456789" // publish is the internal function to publish messages to a nats-server. // Sends a protocol data message by queuing into the bufio writer // and kicking the flush go routine. These writes should be protected. func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { if nc == nil { return ErrInvalidConnection } if subj == "" { return ErrBadSubject } nc.mu.Lock() // Check if headers attempted to be sent to server that does not support them. if len(hdr) > 0 && !nc.info.Headers { nc.mu.Unlock() return ErrHeadersNotSupported } if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } if nc.isDrainingPubs() { nc.mu.Unlock() return ErrConnectionDraining } // Proactively reject payloads over the threshold set by server. msgSize := int64(len(data) + len(hdr)) // Skip this check if we are not yet connected (RetryOnFailedConnect) if !nc.initc && msgSize > nc.info.MaxPayload { nc.mu.Unlock() return ErrMaxPayload } // Check if we are reconnecting, and if so check if // we have exceeded our reconnect outbound buffer limits. if nc.bw.atLimitIfUsingPending() { nc.mu.Unlock() return ErrReconnectBufExceeded } var mh []byte if hdr != nil { mh = nc.scratch[:len(_HPUB_P_)] } else { mh = nc.scratch[1:len(_HPUB_P_)] } mh = append(mh, subj...) mh = append(mh, ' ') if reply != "" { mh = append(mh, reply...) mh = append(mh, ' ') } // We could be smarter here, but simple loop is ok, // just avoid strconv in fast path. // FIXME(dlc) - Find a better way here. // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) // go 1.14 some values strconv faster, may be able to switch over. var b [12]byte i := len(b) if hdr != nil { if len(hdr) > 0 { for l := len(hdr); l > 0; l /= 10 { i-- b[i] = digits[l%10] } } else { i-- b[i] = digits[0] } mh = append(mh, b[i:]...) mh = append(mh, ' ') // reset for below. i = len(b) } if msgSize > 0 { for l := msgSize; l > 0; l /= 10 { i-- b[i] = digits[l%10] } } else { i-- b[i] = digits[0] } mh = append(mh, b[i:]...) mh = append(mh, _CRLF_...) if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil { nc.mu.Unlock() return err } nc.OutMsgs++ nc.OutBytes += uint64(len(data) + len(hdr)) if len(nc.fch) == 0 { nc.kickFlusher() } nc.mu.Unlock() return nil } // respHandler is the global response handler. It will look up // the appropriate channel based on the last token and place // the message on the channel if possible. func (nc *Conn) respHandler(m *Msg) { nc.mu.Lock() // Just return if closed. if nc.isClosed() { nc.mu.Unlock() return } var mch chan *Msg // Grab mch rt := nc.respToken(m.Subject) if rt != _EMPTY_ { mch = nc.respMap[rt] // Delete the key regardless, one response only. delete(nc.respMap, rt) } else if len(nc.respMap) == 1 { // If the server has rewritten the subject, the response token (rt) // will not match (could be the case with JetStream). If that is the // case and there is a single entry, use that. for k, v := range nc.respMap { mch = v delete(nc.respMap, k) break } } nc.mu.Unlock() // Don't block, let Request timeout instead, mch is // buffered and we should delete the key before a // second response is processed. select { case mch <- m: default: return } } // Helper to setup and send new request style requests. Return the chan to receive the response. func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) { nc.mu.Lock() // Create new literal Inbox and map to a chan msg. mch := make(chan *Msg, RequestChanLen) respInbox := nc.newRespInbox() token := respInbox[nc.respSubLen:] nc.respMap[token] = mch if nc.respMux == nil { // Create the response subscription we will use for all new style responses. // This will be on an _INBOX with an additional terminal token. The subscription // will be on a wildcard. s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, nil, false, nil) if err != nil { nc.mu.Unlock() return nil, token, err } nc.respMux = s } nc.mu.Unlock() if err := nc.publish(subj, respInbox, hdr, data); err != nil { return nil, token, err } return mch, token, nil } // RequestMsg will send a request payload including optional headers and deliver // the response message, or an error, including a timeout if no message was received properly. func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { if msg == nil { return nil, ErrInvalidMsg } hdr, err := msg.headerBytes() if err != nil { return nil, err } return nc.request(msg.Subject, hdr, msg.Data, timeout) } // Request will send a request payload and deliver the response message, // or an error, including a timeout if no message was received properly. func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { return nc.request(subj, nil, data, timeout) } func (nc *Conn) useOldRequestStyle() bool { nc.mu.RLock() r := nc.Opts.UseOldRequestStyle nc.mu.RUnlock() return r } func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { if nc == nil { return nil, ErrInvalidConnection } var m *Msg var err error if nc.useOldRequestStyle() { m, err = nc.oldRequest(subj, hdr, data, timeout) } else { m, err = nc.newRequest(subj, hdr, data, timeout) } // Check for no responder status. if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { m, err = nil, ErrNoResponders } return m, err } func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) if err != nil { return nil, err } t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) var ok bool var msg *Msg select { case msg, ok = <-mch: if !ok { return nil, ErrConnectionClosed } case <-t.C: nc.mu.Lock() delete(nc.respMap, token) nc.mu.Unlock() return nil, ErrTimeout } return msg, nil } // oldRequest will create an Inbox and perform a Request() call // with the Inbox reply and return the first reply received. // This is optimized for the case of multiple responses. func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { inbox := nc.NewInbox() ch := make(chan *Msg, RequestChanLen) s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, nil, true, nil) if err != nil { return nil, err } s.AutoUnsubscribe(1) defer s.Unsubscribe() err = nc.publish(subj, inbox, hdr, data) if err != nil { return nil, err } return s.NextMsg(timeout) } // InboxPrefix is the prefix for all inbox subjects. const ( InboxPrefix = "_INBOX." inboxPrefixLen = len(InboxPrefix) replySuffixLen = 8 // Gives us 62^8 rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // NewInbox will return an inbox string which can be used for directed replies from // subscribers. These are guaranteed to be unique, but can be shared and subscribed // to by others. func NewInbox() string { var b [inboxPrefixLen + nuidSize]byte pres := b[:inboxPrefixLen] copy(pres, InboxPrefix) ns := b[inboxPrefixLen:] copy(ns, nuid.Next()) return string(b[:]) } // Create a new inbox that is prefix aware. func (nc *Conn) NewInbox() string { if nc.Opts.InboxPrefix == _EMPTY_ { return NewInbox() } var sb strings.Builder sb.WriteString(nc.Opts.InboxPrefix) sb.WriteByte('.') sb.WriteString(nuid.Next()) return sb.String() } // Function to init new response structures. func (nc *Conn) initNewResp() { nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox()) nc.respSubLen = len(nc.respSubPrefix) nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix) nc.respMap = make(map[string]chan *Msg) nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) } // newRespInbox creates a new literal response subject // that will trigger the mux subscription handler. // Lock should be held. func (nc *Conn) newRespInbox() string { if nc.respMap == nil { nc.initNewResp() } var sb strings.Builder sb.WriteString(nc.respSubPrefix) rn := nc.respRand.Int63() for i := 0; i < replySuffixLen; i++ { sb.WriteByte(rdigits[rn%base]) rn /= base } return sb.String() } // NewRespInbox is the new format used for _INBOX. func (nc *Conn) NewRespInbox() string { nc.mu.Lock() s := nc.newRespInbox() nc.mu.Unlock() return s } // respToken will return the last token of a literal response inbox // which we use for the message channel lookup. This needs to verify the subject // prefix matches to protect itself against the server changing the subject. // Lock should be held. func (nc *Conn) respToken(respInbox string) string { if token, found := strings.CutPrefix(respInbox, nc.respSubPrefix); found { return token } return "" } // Subscribe will express interest in the given subject. The subject // can have wildcards. // There are two type of wildcards: * for partial, and > for full. // A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east. // A subscription on subject time.us.> would receive messages sent to // time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east // since it can't match more than one token. // Messages will be delivered to the associated MsgHandler. func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { return nc.subscribe(subj, _EMPTY_, cb, nil, nil, false, nil) } // ChanSubscribe will express interest in the given subject and place // all messages received on the channel. // You should not close the channel until sub.Unsubscribe() has been called. func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, _EMPTY_, nil, ch, nil, false, nil) } // ChanQueueSubscribe will express interest in the given subject. // All subscribers with the same queue name will form the queue group // and only one member of the group will be selected to receive any given message, // which will be placed on the channel. // You should not close the channel until sub.Unsubscribe() has been called. // Note: This is the same than QueueSubscribeSyncWithChan. func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, group, nil, ch, nil, false, nil) } // SubscribeSync will express interest on the given subject. Messages will // be received synchronously using Subscription.NextMsg(). func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } mch := make(chan *Msg, nc.Opts.SubChanLen) var errCh chan error if nc.Opts.PermissionErrOnSubscribe { errCh = make(chan error, 100) } return nc.subscribe(subj, _EMPTY_, nil, mch, errCh, true, nil) } // QueueSubscribe creates an asynchronous queue subscriber on the given subject. // All subscribers with the same queue name will form the queue group and // only one member of the group will be selected to receive any given // message asynchronously. func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { return nc.subscribe(subj, queue, cb, nil, nil, false, nil) } // QueueSubscribeSync creates a synchronous queue subscriber on the given // subject. All subscribers with the same queue name will form the queue // group and only one member of the group will be selected to receive any // given message synchronously using Subscription.NextMsg(). func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { mch := make(chan *Msg, nc.Opts.SubChanLen) var errCh chan error if nc.Opts.PermissionErrOnSubscribe { errCh = make(chan error, 100) } return nc.subscribe(subj, queue, nil, mch, errCh, true, nil) } // QueueSubscribeSyncWithChan will express interest in the given subject. // All subscribers with the same queue name will form the queue group // and only one member of the group will be selected to receive any given message, // which will be placed on the channel. // You should not close the channel until sub.Unsubscribe() has been called. // Note: This is the same than ChanQueueSubscribe. func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { return nc.subscribe(subj, queue, nil, ch, nil, false, nil) } // badSubject will do quick test on whether a subject is acceptable. // Spaces are not allowed and all tokens should be > 0 in len. func badSubject(subj string) bool { if strings.ContainsAny(subj, " \t\r\n") { return true } tokens := strings.Split(subj, ".") for _, t := range tokens { if len(t) == 0 { return true } } return false } // badQueue will check a queue name for whitespace. func badQueue(qname string) bool { return strings.ContainsAny(qname, " \t\r\n") } // subscribe is the internal subscribe function that indicates interest in a subject. func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, errCh chan (error), isSync bool, js *jsSub) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } nc.mu.Lock() defer nc.mu.Unlock() return nc.subscribeLocked(subj, queue, cb, ch, errCh, isSync, js) } func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, errCh chan (error), isSync bool, js *jsSub) (*Subscription, error) { if nc == nil { return nil, ErrInvalidConnection } if badSubject(subj) { return nil, ErrBadSubject } if queue != _EMPTY_ && badQueue(queue) { return nil, ErrBadQueueName } // Check for some error conditions. if nc.isClosed() { return nil, ErrConnectionClosed } if nc.isDraining() { return nil, ErrConnectionDraining } if cb == nil && ch == nil { return nil, ErrBadSubscription } sub := &Subscription{ Subject: subj, Queue: queue, mcb: cb, conn: nc, jsi: js, } // Set pending limits. if ch != nil { sub.pMsgsLimit = cap(ch) } else { sub.pMsgsLimit = DefaultSubPendingMsgsLimit } sub.pBytesLimit = DefaultSubPendingBytesLimit // If we have an async callback, start up a sub specific // Go routine to deliver the messages. var sr bool if cb != nil { sub.typ = AsyncSubscription sub.pCond = sync.NewCond(&sub.mu) sr = true } else if !isSync { sub.typ = ChanSubscription sub.mch = ch } else { // Sync Subscription sub.typ = SyncSubscription sub.mch = ch sub.errCh = errCh } nc.subsMu.Lock() nc.ssid++ sub.sid = nc.ssid nc.subs[sub.sid] = sub nc.subsMu.Unlock() // Let's start the go routine now that it is fully setup and registered. if sr { go nc.waitForMsgs(sub) } // We will send these for all subs when we reconnect // so that we can suppress here if reconnecting. if !nc.isReconnecting() { nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid)) nc.kickFlusher() } sub.changeSubStatus(SubscriptionActive) return sub, nil } // NumSubscriptions returns active number of subscriptions. func (nc *Conn) NumSubscriptions() int { nc.mu.RLock() defer nc.mu.RUnlock() return len(nc.subs) } // Lock for nc should be held here upon entry func (nc *Conn) removeSub(s *Subscription) { nc.subsMu.Lock() delete(nc.subs, s.sid) nc.subsMu.Unlock() s.mu.Lock() defer s.mu.Unlock() // Release callers on NextMsg for SyncSubscription only if s.mch != nil && s.typ == SyncSubscription { close(s.mch) } s.mch = nil // If JS subscription then stop HB timer. if jsi := s.jsi; jsi != nil { if jsi.hbc != nil { jsi.hbc.Stop() jsi.hbc = nil } if jsi.csfct != nil { jsi.csfct.Stop() jsi.csfct = nil } } if s.typ != AsyncSubscription { done := s.pDone if done != nil { done(s.Subject) } } // Mark as invalid s.closed = true s.changeSubStatus(SubscriptionClosed) if s.pCond != nil { s.pCond.Broadcast() } } // SubscriptionType is the type of the Subscription. type SubscriptionType int // The different types of subscription types. const ( AsyncSubscription = SubscriptionType(iota) SyncSubscription ChanSubscription NilSubscription PullSubscription ) // Type returns the type of Subscription. func (s *Subscription) Type() SubscriptionType { if s == nil { return NilSubscription } s.mu.Lock() defer s.mu.Unlock() // Pull subscriptions are really a SyncSubscription and we want this // type to be set internally for all delivered messages management, etc.. // So check when to return PullSubscription to the user. if s.jsi != nil && s.jsi.pull { return PullSubscription } return s.typ } // IsValid returns a boolean indicating whether the subscription // is still active. This will return false if the subscription has // already been closed. func (s *Subscription) IsValid() bool { if s == nil { return false } s.mu.Lock() defer s.mu.Unlock() return s.conn != nil && !s.closed } // Drain will remove interest but continue callbacks until all messages // have been processed. // // For a JetStream subscription, if the library has created the JetStream // consumer, the library will send a DeleteConsumer request to the server // when the Drain operation completes. If a failure occurs when deleting // the JetStream consumer, an error will be reported to the asynchronous // error callback. // If you do not wish the JetStream consumer to be automatically deleted, // ensure that the consumer is not created by the library, which means // create the consumer with AddConsumer and bind to this consumer. func (s *Subscription) Drain() error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn s.mu.Unlock() if conn == nil { return ErrBadSubscription } return conn.unsubscribe(s, 0, true) } // IsDraining returns a boolean indicating whether the subscription // is being drained. // This will return false if the subscription has already been closed. func (s *Subscription) IsDraining() bool { if s == nil { return false } s.mu.Lock() defer s.mu.Unlock() return s.draining } // StatusChanged returns a channel on which given list of subscription status // changes will be sent. If no status is provided, all status changes will be sent. // Available statuses are SubscriptionActive, SubscriptionDraining, SubscriptionClosed, // and SubscriptionSlowConsumer. // The returned channel will be closed when the subscription is closed. func (s *Subscription) StatusChanged(statuses ...SubStatus) <-chan SubStatus { if len(statuses) == 0 { statuses = []SubStatus{SubscriptionActive, SubscriptionDraining, SubscriptionClosed, SubscriptionSlowConsumer} } ch := make(chan SubStatus, 10) s.mu.Lock() defer s.mu.Unlock() for _, status := range statuses { s.registerStatusChangeListener(status, ch) // initial status if status == s.status { ch <- status } } return ch } // registerStatusChangeListener registers a channel waiting for a specific status change event. // Status change events are non-blocking - if no receiver is waiting for the status change, // it will not be sent on the channel. Closed channels are ignored. // Lock should be held entering. func (s *Subscription) registerStatusChangeListener(status SubStatus, ch chan SubStatus) { if s.statListeners == nil { s.statListeners = make(map[chan SubStatus][]SubStatus) } if _, ok := s.statListeners[ch]; !ok { s.statListeners[ch] = make([]SubStatus, 0) } s.statListeners[ch] = append(s.statListeners[ch], status) } // sendStatusEvent sends subscription status event to all channels. // If there is no listener, sendStatusEvent // will not block. Lock should be held entering. func (s *Subscription) sendStatusEvent(status SubStatus) { for ch, statuses := range s.statListeners { if !containsStatus(statuses, status) { continue } // only send event if someone's listening select { case ch <- status: default: } if status == SubscriptionClosed { close(ch) } } } func containsStatus(statuses []SubStatus, status SubStatus) bool { for _, s := range statuses { if s == status { return true } } return false } // changeSubStatus changes subscription status and sends events // to all listeners. Lock should be held entering. func (s *Subscription) changeSubStatus(status SubStatus) { if s == nil { return } s.sendStatusEvent(status) s.status = status } // Unsubscribe will remove interest in the given subject. // // For a JetStream subscription, if the library has created the JetStream // consumer, it will send a DeleteConsumer request to the server (if the // unsubscribe itself was successful). If the delete operation fails, the // error will be returned. // If you do not wish the JetStream consumer to be automatically deleted, // ensure that the consumer is not created by the library, which means // create the consumer with AddConsumer and bind to this consumer (using // the nats.Bind() option). func (s *Subscription) Unsubscribe() error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn closed := s.closed dc := s.jsi != nil && s.jsi.dc s.mu.Unlock() if conn == nil || conn.IsClosed() { return ErrConnectionClosed } if closed { return ErrBadSubscription } if conn.IsDraining() { return ErrConnectionDraining } err := conn.unsubscribe(s, 0, false) if err == nil && dc { err = s.deleteConsumer() } return err } // checkDrained will watch for a subscription to be fully drained // and then remove it. func (nc *Conn) checkDrained(sub *Subscription) { if nc == nil || sub == nil { return } defer func() { sub.mu.Lock() defer sub.mu.Unlock() sub.draining = false }() // This allows us to know that whatever we have in the client pending // is correct and the server will not send additional information. nc.Flush() sub.mu.Lock() // For JS subscriptions, check if we are going to delete the // JS consumer when drain completes. dc := sub.jsi != nil && sub.jsi.dc sub.mu.Unlock() // Once we are here we just wait for Pending to reach 0 or // any other state to exit this go routine. for { // check connection is still valid. if nc.IsClosed() { return } // Check subscription state sub.mu.Lock() conn := sub.conn closed := sub.closed pMsgs := sub.pMsgs sub.mu.Unlock() if conn == nil || closed || pMsgs == 0 { nc.mu.Lock() nc.removeSub(sub) nc.mu.Unlock() if dc { if err := sub.deleteConsumer(); err != nil { nc.mu.Lock() if errCB := nc.Opts.AsyncErrorCB; errCB != nil { nc.ach.push(func() { errCB(nc, sub, err) }) } nc.mu.Unlock() } } return } time.Sleep(100 * time.Millisecond) } } // AutoUnsubscribe will issue an automatic Unsubscribe that is // processed by the server when max messages have been received. // This can be useful when sending a request to an unknown number // of subscribers. func (s *Subscription) AutoUnsubscribe(max int) error { if s == nil { return ErrBadSubscription } s.mu.Lock() conn := s.conn closed := s.closed s.mu.Unlock() if conn == nil || closed { return ErrBadSubscription } return conn.unsubscribe(s, max, false) } // SetClosedHandler will set the closed handler for when a subscription // is closed (either unsubscribed or drained). func (s *Subscription) SetClosedHandler(handler func(subject string)) { s.mu.Lock() s.pDone = handler s.mu.Unlock() } // unsubscribe performs the low level unsubscribe to the server. // Use Subscription.Unsubscribe() func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { var maxStr string if max > 0 { sub.mu.Lock() sub.max = uint64(max) if sub.delivered < sub.max { maxStr = strconv.Itoa(max) } sub.mu.Unlock() } nc.mu.Lock() // ok here, but defer is expensive defer nc.mu.Unlock() if nc.isClosed() { return ErrConnectionClosed } nc.subsMu.RLock() s := nc.subs[sub.sid] nc.subsMu.RUnlock() // Already unsubscribed if s == nil { return nil } if maxStr == _EMPTY_ && !drainMode { nc.removeSub(s) } if drainMode { s.mu.Lock() s.draining = true sub.changeSubStatus(SubscriptionDraining) s.mu.Unlock() go nc.checkDrained(sub) } // We will send these for all subs when we reconnect // so that we can suppress here. if !nc.isReconnecting() { nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr)) nc.kickFlusher() } // For JetStream subscriptions cancel the attached context if there is any. var cancel func() sub.mu.Lock() jsi := sub.jsi if jsi != nil { cancel = jsi.cancel jsi.cancel = nil } sub.mu.Unlock() if cancel != nil { cancel() } return nil } // NextMsg will return the next message available to a synchronous subscriber // or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), // the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout), // or if there were no responders (ErrNoResponders) when used in the context of a request/reply. func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { if s == nil { return nil, ErrBadSubscription } s.mu.Lock() err := s.validateNextMsgState(false) if err != nil { s.mu.Unlock() return nil, err } // snapshot mch := s.mch s.mu.Unlock() var ok bool var msg *Msg // If something is available right away, let's optimize that case. select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } else { return msg, nil } default: } // If we are here a message was not immediately available, so lets loop // with a timeout. t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) if s.errCh != nil { select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } case err := <-s.errCh: return nil, err case <-t.C: return nil, ErrTimeout } } else { select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } case <-t.C: return nil, ErrTimeout } } return msg, nil } // nextMsgNoTimeout works similarly to Subscription.NextMsg() but will not // time out. It is only used internally for non-timeout subscription iterator. func (s *Subscription) nextMsgNoTimeout() (*Msg, error) { if s == nil { return nil, ErrBadSubscription } s.mu.Lock() err := s.validateNextMsgState(false) if err != nil { s.mu.Unlock() return nil, err } // snapshot mch := s.mch s.mu.Unlock() var ok bool var msg *Msg // If something is available right away, let's optimize that case. select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } else { return msg, nil } default: } if s.errCh != nil { select { case msg, ok = <-mch: if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } case err := <-s.errCh: return nil, err } } else { msg, ok = <-mch if !ok { return nil, s.getNextMsgErr() } if err := s.processNextMsgDelivered(msg); err != nil { return nil, err } } return msg, nil } // validateNextMsgState checks whether the subscription is in a valid // state to call NextMsg and be delivered another message synchronously. // This should be called while holding the lock. func (s *Subscription) validateNextMsgState(pullSubInternal bool) error { if s.connClosed { return ErrConnectionClosed } if s.mch == nil { if s.max > 0 && s.delivered >= s.max { return ErrMaxMessages } else if s.closed { return ErrBadSubscription } } if s.mcb != nil { return ErrSyncSubRequired } // if this subscription previously had a permissions error // and no reconnect has been attempted, return the permissions error // since the subscription does not exist on the server if s.conn.Opts.PermissionErrOnSubscribe && s.permissionsErr != nil { return s.permissionsErr } if s.sc { s.changeSubStatus(SubscriptionActive) s.sc = false return ErrSlowConsumer } // Unless this is from an internal call, reject use of this API. // Users should use Fetch() instead. if !pullSubInternal && s.jsi != nil && s.jsi.pull { return ErrTypeSubscription } return nil } // This is called when the sync channel has been closed. // The error returned will be either connection or subscription // closed depending on what caused NextMsg() to fail. func (s *Subscription) getNextMsgErr() error { s.mu.Lock() defer s.mu.Unlock() if s.connClosed { return ErrConnectionClosed } return ErrBadSubscription } // processNextMsgDelivered takes a message and applies the needed // accounting to the stats from the subscription, returning an // error in case we have the maximum number of messages have been // delivered already. It should not be called while holding the lock. func (s *Subscription) processNextMsgDelivered(msg *Msg) error { s.mu.Lock() nc := s.conn max := s.max var fcReply string // Update some stats. s.delivered++ delivered := s.delivered if s.jsi != nil { fcReply = s.checkForFlowControlResponse() } if s.typ == SyncSubscription { s.pMsgs-- s.pBytes -= len(msg.Data) } s.mu.Unlock() if fcReply != _EMPTY_ { nc.Publish(fcReply, nil) } if max > 0 { if delivered > max { return ErrMaxMessages } // Remove subscription if we have reached max. if delivered == max { nc.mu.Lock() nc.removeSub(s) nc.mu.Unlock() } } if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders { return ErrNoResponders } return nil } // Queued returns the number of queued messages in the client for this subscription. // // Deprecated: Use Pending() func (s *Subscription) QueuedMsgs() (int, error) { m, _, err := s.Pending() return int(m), err } // Pending returns the number of queued messages and queued bytes in the client for this subscription. func (s *Subscription) Pending() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgs, s.pBytes, nil } // MaxPending returns the maximum number of queued messages and queued bytes seen so far. func (s *Subscription) MaxPending() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgsMax, s.pBytesMax, nil } // ClearMaxPending resets the maximums seen so far. func (s *Subscription) ClearMaxPending() error { if s == nil { return ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return ErrBadSubscription } if s.typ == ChanSubscription { return ErrTypeSubscription } s.pMsgsMax, s.pBytesMax = 0, 0 return nil } // Pending Limits const ( // DefaultSubPendingMsgsLimit will be 512k msgs. DefaultSubPendingMsgsLimit = 512 * 1024 // DefaultSubPendingBytesLimit is 64MB DefaultSubPendingBytesLimit = 64 * 1024 * 1024 ) // PendingLimits returns the current limits for this subscription. // If no error is returned, a negative value indicates that the // given metric is not limited. func (s *Subscription) PendingLimits() (int, int, error) { if s == nil { return -1, -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, -1, ErrBadSubscription } if s.typ == ChanSubscription { return -1, -1, ErrTypeSubscription } return s.pMsgsLimit, s.pBytesLimit, nil } // SetPendingLimits sets the limits for pending msgs and bytes for this subscription. // Zero is not allowed. Any negative value means that the given metric is not limited. func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { if s == nil { return ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return ErrBadSubscription } if s.typ == ChanSubscription { return ErrTypeSubscription } if msgLimit == 0 || bytesLimit == 0 { return ErrInvalidArg } s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit return nil } // Delivered returns the number of delivered messages for this subscription. func (s *Subscription) Delivered() (int64, error) { if s == nil { return -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, ErrBadSubscription } return int64(s.delivered), nil } // Dropped returns the number of known dropped messages for this subscription. // This will correspond to messages dropped by violations of PendingLimits. If // the server declares the connection a SlowConsumer, this number may not be // valid. func (s *Subscription) Dropped() (int, error) { if s == nil { return -1, ErrBadSubscription } s.mu.Lock() defer s.mu.Unlock() if s.conn == nil || s.closed { return -1, ErrBadSubscription } return s.dropped, nil } // Respond allows a convenient way to respond to requests in service based subscriptions. func (m *Msg) Respond(data []byte) error { if m == nil || m.Sub == nil { return ErrMsgNotBound } if m.Reply == "" { return ErrMsgNoReply } m.Sub.mu.Lock() nc := m.Sub.conn m.Sub.mu.Unlock() // No need to check the connection here since the call to publish will do all the checking. return nc.Publish(m.Reply, data) } // RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers func (m *Msg) RespondMsg(msg *Msg) error { if m == nil || m.Sub == nil { return ErrMsgNotBound } if m.Reply == "" { return ErrMsgNoReply } msg.Subject = m.Reply m.Sub.mu.Lock() nc := m.Sub.conn m.Sub.mu.Unlock() // No need to check the connection here since the call to publish will do all the checking. return nc.PublishMsg(msg) } // FIXME: This is a hack // removeFlushEntry is needed when we need to discard queued up responses // for our pings as part of a flush call. This happens when we have a flush // call outstanding and we call close. func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { nc.mu.Lock() defer nc.mu.Unlock() if nc.pongs == nil { return false } for i, c := range nc.pongs { if c == ch { nc.pongs[i] = nil return true } } return false } // The lock must be held entering this function. func (nc *Conn) sendPing(ch chan struct{}) { nc.pongs = append(nc.pongs, ch) nc.bw.appendString(pingProto) // Flush in place. nc.bw.flush() } // This will fire periodically and send a client origin // ping to the server. Will also check that we have received // responses from the server. func (nc *Conn) processPingTimer() { nc.mu.Lock() if nc.status != CONNECTED { nc.mu.Unlock() return } // Check for violation nc.pout++ if nc.pout > nc.Opts.MaxPingsOut { nc.mu.Unlock() if shouldClose := nc.processOpErr(ErrStaleConnection); shouldClose { nc.close(CLOSED, true, nil) } return } nc.sendPing(nil) nc.ptmr.Reset(nc.Opts.PingInterval) nc.mu.Unlock() } // FlushTimeout allows a Flush operation to have an associated timeout. func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { if nc == nil { return ErrInvalidConnection } if timeout <= 0 { return ErrBadTimeout } nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } t := globalTimerPool.Get(timeout) defer globalTimerPool.Put(t) // Create a buffered channel to prevent chan send to block // in processPong() if this code here times out just when // PONG was received. ch := make(chan struct{}, 1) nc.sendPing(ch) nc.mu.Unlock() select { case _, ok := <-ch: if !ok { err = ErrConnectionClosed } else { close(ch) } case <-t.C: err = ErrTimeout } if err != nil { nc.removeFlushEntry(ch) } return } // RTT calculates the round trip time between this client and the server. func (nc *Conn) RTT() (time.Duration, error) { if nc.IsClosed() { return 0, ErrConnectionClosed } if nc.IsReconnecting() { return 0, ErrDisconnected } start := time.Now() if err := nc.FlushTimeout(10 * time.Second); err != nil { return 0, err } return time.Since(start), nil } // Flush will perform a round trip to the server and return when it // receives the internal reply. func (nc *Conn) Flush() error { return nc.FlushTimeout(10 * time.Second) } // Buffered will return the number of bytes buffered to be sent to the server. // FIXME(dlc) take into account disconnected state. func (nc *Conn) Buffered() (int, error) { nc.mu.RLock() defer nc.mu.RUnlock() if nc.isClosed() || nc.bw == nil { return -1, ErrConnectionClosed } return nc.bw.buffered(), nil } // resendSubscriptions will send our subscription state back to the // server. Used in reconnects func (nc *Conn) resendSubscriptions() { // Since we are going to send protocols to the server, we don't want to // be holding the subsMu lock (which is used in processMsg). So copy // the subscriptions in a temporary array. nc.subsMu.RLock() subs := make([]*Subscription, 0, len(nc.subs)) for _, s := range nc.subs { subs = append(subs, s) } nc.subsMu.RUnlock() for _, s := range subs { adjustedMax := uint64(0) s.mu.Lock() // when resending subscriptions, the permissions error should be cleared // since the user may have fixed the permissions issue s.permissionsErr = nil if s.max > 0 { if s.delivered < s.max { adjustedMax = s.max - s.delivered } // adjustedMax could be 0 here if the number of delivered msgs // reached the max, if so unsubscribe. if adjustedMax == 0 { s.mu.Unlock() nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) continue } } subj, queue, sid := s.Subject, s.Queue, s.sid s.mu.Unlock() nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid)) if adjustedMax > 0 { maxStr := strconv.Itoa(int(adjustedMax)) nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr)) } } } // This will clear any pending flush calls and release pending calls. // Lock is assumed to be held by the caller. func (nc *Conn) clearPendingFlushCalls() { // Clear any queued pongs, e.g. pending flush calls. for _, ch := range nc.pongs { if ch != nil { close(ch) } } nc.pongs = nil } // This will clear any pending Request calls. // Lock is assumed to be held by the caller. func (nc *Conn) clearPendingRequestCalls() { for key, ch := range nc.respMap { if ch != nil { close(ch) delete(nc.respMap, key) } } } // Low level close call that will do correct cleanup and set // desired status. Also controls whether user defined callbacks // will be triggered. The lock should not be held entering this // function. This function will handle the locking manually. func (nc *Conn) close(status Status, doCBs bool, err error) { nc.mu.Lock() if nc.isClosed() { nc.status = status nc.mu.Unlock() return } nc.status = CLOSED // Kick the Go routines so they fall out. nc.kickFlusher() // If the reconnect timer is waiting between a reconnect attempt, // this will kick it out. if nc.rqch != nil { close(nc.rqch) nc.rqch = nil } // Clear any queued pongs, e.g. pending flush calls. nc.clearPendingFlushCalls() // Clear any queued and blocking Requests. nc.clearPendingRequestCalls() // Stop ping timer if set. nc.stopPingTimer() nc.ptmr = nil // Need to close and set TCP conn to nil if reconnect loop has stopped, // otherwise we would incorrectly invoke Disconnect handler (if set) // down below. if nc.ar && nc.conn != nil { nc.conn.Close() nc.conn = nil } else if nc.conn != nil { // Go ahead and make sure we have flushed the outbound nc.bw.flush() defer nc.conn.Close() } // Close sync subscriber channels and release any // pending NextMsg() calls. nc.subsMu.Lock() for _, s := range nc.subs { s.mu.Lock() // Release callers on NextMsg for SyncSubscription only if s.mch != nil && s.typ == SyncSubscription { close(s.mch) } s.mch = nil // Mark as invalid, for signaling to waitForMsgs s.closed = true // Mark connection closed in subscription s.connClosed = true // If we have an async subscription, signals it to exit if s.typ == AsyncSubscription && s.pCond != nil { s.pCond.Signal() } s.mu.Unlock() } nc.subs = nil nc.subsMu.Unlock() nc.changeConnStatus(status) // Perform appropriate callback if needed for a disconnect. if doCBs { if nc.conn != nil { if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil { nc.ach.push(func() { disconnectedErrCB(nc, err) }) } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil { nc.ach.push(func() { disconnectedCB(nc) }) } } if nc.Opts.ClosedCB != nil { nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) } } // If this is terminal, then we have to notify the asyncCB handler that // it can exit once all async callbacks have been dispatched. if status == CLOSED { nc.ach.close() } nc.mu.Unlock() } // Close will close the connection to the server. This call will release // all blocking calls, such as Flush() and NextMsg() func (nc *Conn) Close() { if nc != nil { // This will be a no-op if the connection was not websocket. // We do this here as opposed to inside close() because we want // to do this only for the final user-driven close of the client. // Otherwise, we would need to change close() to pass a boolean // indicating that this is the case. nc.wsClose() nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) } } // IsClosed tests if a Conn has been closed. func (nc *Conn) IsClosed() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isClosed() } // IsReconnecting tests if a Conn is reconnecting. func (nc *Conn) IsReconnecting() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isReconnecting() } // IsConnected tests if a Conn is connected. func (nc *Conn) IsConnected() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isConnected() } // drainConnection will run in a separate Go routine and will // flush all publishes and drain all active subscriptions. func (nc *Conn) drainConnection() { // Snapshot subs list. nc.mu.Lock() // Check again here if we are in a state to not process. if nc.isClosed() { nc.mu.Unlock() return } if nc.isConnecting() || nc.isReconnecting() { nc.mu.Unlock() // Move to closed state. nc.Close() return } subs := make([]*Subscription, 0, len(nc.subs)) for _, s := range nc.subs { if s == nc.respMux { // Skip since might be in use while messages // are being processed (can miss responses). continue } subs = append(subs, s) } errCB := nc.Opts.AsyncErrorCB drainWait := nc.Opts.DrainTimeout respMux := nc.respMux nc.mu.Unlock() // for pushing errors with context. pushErr := func(err error) { nc.mu.Lock() nc.err = err if errCB != nil { nc.ach.push(func() { errCB(nc, nil, err) }) } nc.mu.Unlock() } // Do subs first, skip request handler if present. for _, s := range subs { if err := s.Drain(); err != nil { // We will notify about these but continue. pushErr(err) } } // Wait for the subscriptions to drop to zero. timeout := time.Now().Add(drainWait) var min int if respMux != nil { min = 1 } else { min = 0 } for time.Now().Before(timeout) { if nc.NumSubscriptions() == min { break } time.Sleep(10 * time.Millisecond) } // In case there was a request/response handler // then need to call drain at the end. if respMux != nil { if err := respMux.Drain(); err != nil { // We will notify about these but continue. pushErr(err) } for time.Now().Before(timeout) { if nc.NumSubscriptions() == 0 { break } time.Sleep(10 * time.Millisecond) } } // Check if we timed out. if nc.NumSubscriptions() != 0 { pushErr(ErrDrainTimeout) } // Flip State nc.mu.Lock() nc.changeConnStatus(DRAINING_PUBS) nc.mu.Unlock() // Do publish drain via Flush() call. err := nc.FlushTimeout(5 * time.Second) if err != nil { pushErr(err) } // Move to closed state. nc.Close() } // Drain will put a connection into a drain state. All subscriptions will // immediately be put into a drain state. Upon completion, the publishers // will be drained and can not publish any additional messages. Upon draining // of the publishers, the connection will be closed. Use the ClosedCB // option to know when the connection has moved from draining to closed. // // See note in Subscription.Drain for JetStream subscriptions. func (nc *Conn) Drain() error { nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } if nc.isConnecting() || nc.isReconnecting() { nc.mu.Unlock() nc.Close() return ErrConnectionReconnecting } if nc.isDraining() { nc.mu.Unlock() return nil } nc.changeConnStatus(DRAINING_SUBS) go nc.drainConnection() nc.mu.Unlock() return nil } // IsDraining tests if a Conn is in the draining state. func (nc *Conn) IsDraining() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.isDraining() } // caller must lock func (nc *Conn) getServers(implicitOnly bool) []string { poolSize := len(nc.srvPool) servers := make([]string, 0) for i := 0; i < poolSize; i++ { if implicitOnly && !nc.srvPool[i].isImplicit { continue } url := nc.srvPool[i].url servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) } return servers } // Servers returns the list of known server urls, including additional // servers discovered after a connection has been established. If // authentication is enabled, use UserInfo or Token when connecting with // these urls. func (nc *Conn) Servers() []string { nc.mu.RLock() defer nc.mu.RUnlock() return nc.getServers(false) } // DiscoveredServers returns only the server urls that have been discovered // after a connection has been established. If authentication is enabled, // use UserInfo or Token when connecting with these urls. func (nc *Conn) DiscoveredServers() []string { nc.mu.RLock() defer nc.mu.RUnlock() return nc.getServers(true) } // Status returns the current state of the connection. func (nc *Conn) Status() Status { nc.mu.RLock() defer nc.mu.RUnlock() return nc.status } // Test if Conn has been closed Lock is assumed held. func (nc *Conn) isClosed() bool { return nc.status == CLOSED } // Test if Conn is in the process of connecting func (nc *Conn) isConnecting() bool { return nc.status == CONNECTING } // Test if Conn is being reconnected. func (nc *Conn) isReconnecting() bool { return nc.status == RECONNECTING } // Test if Conn is connected or connecting. func (nc *Conn) isConnected() bool { return nc.status == CONNECTED || nc.isDraining() } // Test if Conn is in the draining state. func (nc *Conn) isDraining() bool { return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS } // Test if Conn is in the draining state for pubs. func (nc *Conn) isDrainingPubs() bool { return nc.status == DRAINING_PUBS } // Stats will return a race safe copy of the Statistics section for the connection. func (nc *Conn) Stats() Statistics { // Stats are updated either under connection's mu or with atomic operations // for inbound stats in processMsg(). nc.mu.Lock() stats := Statistics{ InMsgs: atomic.LoadUint64(&nc.InMsgs), InBytes: atomic.LoadUint64(&nc.InBytes), OutMsgs: nc.OutMsgs, OutBytes: nc.OutBytes, Reconnects: nc.Reconnects, } nc.mu.Unlock() return stats } // MaxPayload returns the size limit that a message payload can have. // This is set by the server configuration and delivered to the client // upon connect. func (nc *Conn) MaxPayload() int64 { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.MaxPayload } // HeadersSupported will return if the server supports headers func (nc *Conn) HeadersSupported() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.Headers } // AuthRequired will return if the connected server requires authorization. func (nc *Conn) AuthRequired() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.AuthRequired } // TLSRequired will return if the connected server requires TLS connections. func (nc *Conn) TLSRequired() bool { nc.mu.RLock() defer nc.mu.RUnlock() return nc.info.TLSRequired } // Barrier schedules the given function `f` to all registered asynchronous // subscriptions. // Only the last subscription to see this barrier will invoke the function. // If no subscription is registered at the time of this call, `f()` is invoked // right away. // ErrConnectionClosed is returned if the connection is closed prior to // the call. func (nc *Conn) Barrier(f func()) error { nc.mu.Lock() if nc.isClosed() { nc.mu.Unlock() return ErrConnectionClosed } nc.subsMu.Lock() // Need to figure out how many non chan subscriptions there are numSubs := 0 for _, sub := range nc.subs { if sub.typ == AsyncSubscription { numSubs++ } } if numSubs == 0 { nc.subsMu.Unlock() nc.mu.Unlock() f() return nil } barrier := &barrierInfo{refs: int64(numSubs), f: f} for _, sub := range nc.subs { sub.mu.Lock() if sub.mch == nil { msg := &Msg{barrier: barrier} // Push onto the async pList if sub.pTail != nil { sub.pTail.next = msg } else { sub.pHead = msg sub.pCond.Signal() } sub.pTail = msg } sub.mu.Unlock() } nc.subsMu.Unlock() nc.mu.Unlock() return nil } // GetClientIP returns the client IP as known by the server. // Supported as of server version 2.1.6. func (nc *Conn) GetClientIP() (net.IP, error) { nc.mu.RLock() defer nc.mu.RUnlock() if nc.isClosed() { return nil, ErrConnectionClosed } if nc.info.ClientIP == "" { return nil, ErrClientIPNotSupported } ip := net.ParseIP(nc.info.ClientIP) return ip, nil } // GetClientID returns the client ID assigned by the server to which // the client is currently connected to. Note that the value may change if // the client reconnects. // This function returns ErrClientIDNotSupported if the server is of a // version prior to 1.2.0. func (nc *Conn) GetClientID() (uint64, error) { nc.mu.RLock() defer nc.mu.RUnlock() if nc.isClosed() { return 0, ErrConnectionClosed } if nc.info.CID == 0 { return 0, ErrClientIDNotSupported } return nc.info.CID, nil } // StatusChanged returns a channel on which given list of connection status changes will be reported. // If no statuses are provided, defaults will be used: CONNECTED, RECONNECTING, DISCONNECTED, CLOSED. func (nc *Conn) StatusChanged(statuses ...Status) chan Status { if len(statuses) == 0 { statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED} } ch := make(chan Status, 10) nc.mu.Lock() defer nc.mu.Unlock() for _, s := range statuses { nc.registerStatusChangeListener(s, ch) } return ch } // registerStatusChangeListener registers a channel waiting for a specific status change event. // Status change events are non-blocking - if no receiver is waiting for the status change, // it will not be sent on the channel. Closed channels are ignored. // The lock should be held entering. func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) { if nc.statListeners == nil { nc.statListeners = make(map[Status][]chan Status) } if _, ok := nc.statListeners[status]; !ok { nc.statListeners[status] = make([]chan Status, 0) } nc.statListeners[status] = append(nc.statListeners[status], ch) } // sendStatusEvent sends connection status event to all channels. // If channel is closed, or there is no listener, sendStatusEvent // will not block. Lock should be held entering. func (nc *Conn) sendStatusEvent(s Status) { Loop: for i := 0; i < len(nc.statListeners[s]); i++ { // make sure channel is not closed select { case <-nc.statListeners[s][i]: // if chan is closed, remove it nc.statListeners[s][i] = nc.statListeners[s][len(nc.statListeners[s])-1] nc.statListeners[s] = nc.statListeners[s][:len(nc.statListeners[s])-1] i-- continue Loop default: } // only send event if someone's listening select { case nc.statListeners[s][i] <- s: default: } } } // changeConnStatus changes connections status and sends events // to all listeners. Lock should be held entering. func (nc *Conn) changeConnStatus(status Status) { if nc == nil { return } nc.sendStatusEvent(status) nc.status = status } // NkeyOptionFromSeed will load an nkey pair from a seed file. // It will return the NKey Option and will handle // signing of nonce challenges from the server. It will take // care to not hold keys in memory and to wipe memory. func NkeyOptionFromSeed(seedFile string) (Option, error) { kp, err := nkeyPairFromSeedFile(seedFile) if err != nil { return nil, err } // Wipe our key on exit. defer kp.Wipe() pub, err := kp.PublicKey() if err != nil { return nil, err } if !nkeys.IsValidPublicUserKey(pub) { return nil, errors.New("nats: Not a valid nkey user seed") } sigCB := func(nonce []byte) ([]byte, error) { return sigHandler(nonce, seedFile) } return Nkey(string(pub), sigCB), nil } // Just wipe slice with 'x', for clearing contents of creds or nkey seed file. func wipeSlice(buf []byte) { for i := range buf { buf[i] = 'x' } } func userFromFile(userFile string) (string, error) { path, err := expandPath(userFile) if err != nil { return _EMPTY_, fmt.Errorf("nats: %w", err) } contents, err := os.ReadFile(path) if err != nil { return _EMPTY_, fmt.Errorf("nats: %w", err) } defer wipeSlice(contents) return nkeys.ParseDecoratedJWT(contents) } func homeDir() (string, error) { if runtime.GOOS == "windows" { homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") userProfile := os.Getenv("USERPROFILE") var home string if homeDrive == "" || homePath == "" { if userProfile == "" { return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") } home = userProfile } else { home = filepath.Join(homeDrive, homePath) } return home, nil } home := os.Getenv("HOME") if home == "" { return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") } return home, nil } func expandPath(p string) (string, error) { p = os.ExpandEnv(p) if !strings.HasPrefix(p, "~") { return p, nil } home, err := homeDir() if err != nil { return _EMPTY_, err } return filepath.Join(home, p[1:]), nil } func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { contents, err := os.ReadFile(seedFile) if err != nil { return nil, fmt.Errorf("nats: %w", err) } defer wipeSlice(contents) return nkeys.ParseDecoratedNKey(contents) } // Sign authentication challenges from the server. // Do not keep private seed in memory. func sigHandler(nonce []byte, seedFile string) ([]byte, error) { kp, err := nkeyPairFromSeedFile(seedFile) if err != nil { return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err) } // Wipe our key on exit. defer kp.Wipe() sig, _ := kp.Sign(nonce) return sig, nil } type timeoutWriter struct { timeout time.Duration conn net.Conn err error } // Write implements the io.Writer interface. func (tw *timeoutWriter) Write(p []byte) (int, error) { if tw.err != nil { return 0, tw.err } var n int tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) n, tw.err = tw.conn.Write(p) tw.conn.SetWriteDeadline(time.Time{}) return n, tw.err } nats.go-1.41.0/nats_iter.go000066400000000000000000000040611477351342400155000ustar00rootroot00000000000000// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build go1.23 package nats import ( "errors" "iter" "time" ) // Msgs returns an iter.Seq2[*Msg, error] that can be used to iterate over // messages. It can only be used with a subscription that has been created with // SubscribeSync or QueueSubscribeSync, otherwise it will return an error on the // first iteration. // // The iterator will block until a message is available. The // subscription will not be closed when the iterator is done. func (sub *Subscription) Msgs() iter.Seq2[*Msg, error] { return func(yield func(*Msg, error) bool) { for { msg, err := sub.nextMsgNoTimeout() if err != nil { yield(nil, err) return } if !yield(msg, nil) { return } } } } // MsgsTimeout returns an iter.Seq2[*Msg, error] that can be used to iterate // over messages. It can only be used with a subscription that has been created // with SubscribeSync or QueueSubscribeSync, otherwise it will return an error // on the first iteration. // // The iterator will block until a message is available or the timeout is // reached. If the timeout is reached, the iterator will return nats.ErrTimeout // but it will not be closed. func (sub *Subscription) MsgsTimeout(timeout time.Duration) iter.Seq2[*Msg, error] { return func(yield func(*Msg, error) bool) { for { msg, err := sub.NextMsg(timeout) if err != nil { if !yield(nil, err) { return } if !errors.Is(err, ErrTimeout) { return } } if !yield(msg, nil) { return } } } } nats.go-1.41.0/nats_test.go000066400000000000000000001433721477351342400155250ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats //////////////////////////////////////////////////////////////////////////////// // Package scoped specific tests here.. //////////////////////////////////////////////////////////////////////////////// import ( "bufio" "bytes" "encoding/json" "errors" "fmt" "net" "net/http" "os" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nkeys" ) func TestVersion(t *testing.T) { // Semantic versioning verRe := regexp.MustCompile(`\d+.\d+.\d+(-\S+)?`) if !verRe.MatchString(Version) { t.Fatalf("Version not compatible with semantic versioning: %q", Version) } } // Dumb wait program to sync on callbacks, etc... Will timeout func Wait(ch chan bool) error { return WaitTime(ch, 5*time.Second) } func WaitTime(ch chan bool, timeout time.Duration) error { select { case <-ch: return nil case <-time.After(timeout): } return errors.New("timeout") } func stackFatalf(t *testing.T, f string, args ...any) { lines := make([]string, 0, 32) msg := fmt.Sprintf(f, args...) lines = append(lines, msg) // Generate the Stack of callers: Skip us and verify* frames. for i := 1; true; i++ { _, file, line, ok := runtime.Caller(i) if !ok { break } msg := fmt.Sprintf("%d - %s:%d", i, file, line) lines = append(lines, msg) } t.Fatalf("%s", strings.Join(lines, "\n")) } // Check the error channel for an error and if one is present, // calls t.Fatal(e.Error()). Note that this supports tests that // send nil to the error channel and so report error only if // e is != nil. func checkErrChannel(t *testing.T, errCh chan error) { t.Helper() select { case e := <-errCh: if e != nil { t.Fatal(e.Error()) } default: } } func TestVersionMatchesTag(t *testing.T) { refType := os.Getenv("GITHUB_REF_TYPE") if refType != "tag" { t.SkipNow() } tag := os.Getenv("GITHUB_REF_NAME") // We expect a tag of the form vX.Y.Z. If that's not the case, // we need someone to have a look. So fail if first letter is not // a `v` if tag[0] != 'v' { t.Fatalf("Expect tag to start with `v`, tag is: %s", tag) } // Strip the `v` from the tag for the version comparison. if Version != tag[1:] { t.Fatalf("Version (%s) does not match tag (%s)", Version, tag[1:]) } } func TestExpandPath(t *testing.T) { if runtime.GOOS == "windows" { origUserProfile := os.Getenv("USERPROFILE") origHomeDrive, origHomePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") defer func() { os.Setenv("USERPROFILE", origUserProfile) os.Setenv("HOMEDRIVE", origHomeDrive) os.Setenv("HOMEPATH", origHomePath) }() cases := []struct { path string userProfile string homeDrive string homePath string wantPath string wantErr bool }{ // Missing HOMEDRIVE and HOMEPATH. {path: "/Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "/Foo/Bar"}, {path: "Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "Foo/Bar"}, {path: "~/Fizz", userProfile: `C:\Foo\Bar`, wantPath: `C:\Foo\Bar\Fizz`}, // Missing USERPROFILE. {path: "~/Fizz", homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`}, // Set all environment variables. HOMEDRIVE and HOMEPATH take // precedence. {path: "~/Fizz", userProfile: `C:\Foo\Bar`, homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`}, // Missing all environment variables. {path: "~/Fizz", wantErr: true}, } for i, c := range cases { t.Run(fmt.Sprintf("windows case %d", i), func(t *testing.T) { os.Setenv("USERPROFILE", c.userProfile) os.Setenv("HOMEDRIVE", c.homeDrive) os.Setenv("HOMEPATH", c.homePath) gotPath, err := expandPath(c.path) if !c.wantErr && err != nil { t.Fatalf("unexpected error: got=%v; want=%v", err, nil) } else if c.wantErr && err == nil { t.Fatalf("unexpected success: got=%v; want=%v", nil, "err") } if gotPath != c.wantPath { t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath) } }) } return } // Unix tests origHome := os.Getenv("HOME") defer os.Setenv("HOME", origHome) cases := []struct { path string home string testEnv string wantPath string wantErr bool }{ {path: "/foo/bar", home: "/fizz/buzz", wantPath: "/foo/bar"}, {path: "foo/bar", home: "/fizz/buzz", wantPath: "foo/bar"}, {path: "~/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"}, {path: "$HOME/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"}, // missing HOME env var {path: "~/fizz", wantErr: true}, } for i, c := range cases { t.Run(fmt.Sprintf("unix case %d", i), func(t *testing.T) { os.Setenv("HOME", c.home) gotPath, err := expandPath(c.path) if !c.wantErr && err != nil { t.Fatalf("unexpected error: got=%v; want=%v", err, nil) } else if c.wantErr && err == nil { t.Fatalf("unexpected success: got=%v; want=%v", nil, "err") } if gotPath != c.wantPath { t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath) } }) } } //////////////////////////////////////////////////////////////////////////////// // ServerPool tests //////////////////////////////////////////////////////////////////////////////// var testServers = []string{ "nats://localhost:1222", "nats://localhost:1223", "nats://localhost:1224", "nats://localhost:1225", "nats://localhost:1226", "nats://localhost:1227", "nats://localhost:1228", } func TestSimplifiedURLs(t *testing.T) { for _, test := range []struct { name string servers []string expected []string }{ { "nats", []string{ "nats://host1:1234/", "nats://host1:1234", "nats://host2:", "nats://host3", "host4:1234", "host5:", "host6", "nats://[1:2:3:4]:1234", "nats://[5:6:7:8]:", "nats://[9:10:11:12]", "[13:14:15:16]:", "[17:18:19:20]:1234", }, []string{ "nats://host1:1234/", "nats://host1:1234", "nats://host2:4222", "nats://host3:4222", "nats://host4:1234", "nats://host5:4222", "nats://host6:4222", "nats://[1:2:3:4]:1234", "nats://[5:6:7:8]:4222", "nats://[9:10:11:12]:4222", "nats://[13:14:15:16]:4222", "nats://[17:18:19:20]:1234", }, }, { "ws", []string{ "ws://host1:1234", "ws://host2:", "ws://host3", "ws://[1:2:3:4]:1234", "ws://[5:6:7:8]:", "ws://[9:10:11:12]", }, []string{ "ws://host1:1234", "ws://host2:80", "ws://host3:80", "ws://[1:2:3:4]:1234", "ws://[5:6:7:8]:80", "ws://[9:10:11:12]:80", }, }, { "wss", []string{ "wss://host1:1234", "wss://host2:", "wss://host3", "wss://[1:2:3:4]:1234", "wss://[5:6:7:8]:", "wss://[9:10:11:12]", }, []string{ "wss://host1:1234", "wss://host2:443", "wss://host3:443", "wss://[1:2:3:4]:1234", "wss://[5:6:7:8]:443", "wss://[9:10:11:12]:443", }, }, } { t.Run(test.name, func(t *testing.T) { opts := GetDefaultOptions() opts.NoRandomize = true opts.Servers = test.servers nc := &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } // Check server pool directly for i, u := range nc.srvPool { if u.url.String() != test.expected[i] { t.Fatalf("Expected url %q, got %q", test.expected[i], u.url.String()) } } }) } } func TestServersRandomize(t *testing.T) { opts := GetDefaultOptions() opts.Servers = testServers nc := &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } // Build []string from srvPool clientServers := []string{} for _, s := range nc.srvPool { clientServers = append(clientServers, s.url.String()) } // In theory this could happen.. if reflect.DeepEqual(testServers, clientServers) { t.Fatalf("ServerPool list not randomized\n") } // Now test that we do not randomize if proper flag is set. opts = GetDefaultOptions() opts.Servers = testServers opts.NoRandomize = true nc = &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } // Build []string from srvPool clientServers = []string{} for _, s := range nc.srvPool { clientServers = append(clientServers, s.url.String()) } if !reflect.DeepEqual(testServers, clientServers) { t.Fatalf("ServerPool list should not be randomized\n") } // Although the original intent was that if Opts.Url is // set, Opts.Servers is not (and vice versa), the behavior // is that Opts.Url is always first, even when randomization // is enabled. So make sure that this is still the case. opts = GetDefaultOptions() opts.Url = DefaultURL opts.Servers = testServers nc = &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } // Build []string from srvPool clientServers = []string{} for _, s := range nc.srvPool { clientServers = append(clientServers, s.url.String()) } // In theory this could happen.. if reflect.DeepEqual(testServers, clientServers) { t.Fatalf("ServerPool list not randomized\n") } if clientServers[0] != DefaultURL { t.Fatalf("Options.Url should be first in the array, got %v", clientServers[0]) } } func TestSelectNextServer(t *testing.T) { opts := GetDefaultOptions() opts.Servers = testServers opts.NoRandomize = true nc := &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } if nc.current != nc.srvPool[0] { t.Fatalf("Wrong default selection: %v\n", nc.current.url) } sel, err := nc.selectNextServer() if err != nil { t.Fatalf("Got an err: %v\n", err) } // Check that we are now looking at #2, and current is now last. if len(nc.srvPool) != len(testServers) { t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)) } if nc.current.url.String() != testServers[1] { t.Fatalf("Selection incorrect: %v vs %v\n", nc.current.url, testServers[1]) } if nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] { t.Fatalf("Did not push old to last position\n") } if sel != nc.srvPool[0] { t.Fatalf("Did not return correct server: %v vs %v\n", sel.url, nc.srvPool[0].url) } // Test that we do not keep servers where we have tried to reconnect past our limit. nc.srvPool[0].reconnects = int(opts.MaxReconnect) if _, err := nc.selectNextServer(); err != nil { t.Fatalf("Got an err: %v\n", err) } // Check that we are now looking at #3, and current is not in the list. if len(nc.srvPool) != len(testServers)-1 { t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)-1) } if nc.current.url.String() != testServers[2] { t.Fatalf("Selection incorrect: %v vs %v\n", nc.current.url, testServers[2]) } if nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] { t.Fatalf("Did not throw away the last server correctly\n") } } // This will test that comma separated url strings work properly for // the Connect() command. func TestUrlArgument(t *testing.T) { check := func(url string, expected []string) { if !reflect.DeepEqual(processUrlString(url), expected) { t.Fatalf("Got wrong response processing URL: %q, RES: %#v\n", url, processUrlString(url)) } } // This is normal case oneExpected := []string{"nats://localhost:1222"} check("nats://localhost:1222", oneExpected) check("nats://localhost:1222 ", oneExpected) check(" nats://localhost:1222", oneExpected) check(" nats://localhost:1222 ", oneExpected) check("nats://localhost:1222/", oneExpected) var multiExpected = []string{ "nats://localhost:1222", "nats://localhost:1223", "nats://localhost:1224", } check("nats://localhost:1222,nats://localhost:1223,nats://localhost:1224", multiExpected) check("nats://localhost:1222, nats://localhost:1223, nats://localhost:1224", multiExpected) check(" nats://localhost:1222, nats://localhost:1223, nats://localhost:1224 ", multiExpected) check("nats://localhost:1222, nats://localhost:1223 ,nats://localhost:1224", multiExpected) check("nats://localhost:1222/,nats://localhost:1223/,nats://localhost:1224/", multiExpected) } func TestParserPing(t *testing.T) { c := &Conn{} c.newReaderWriter() c.bw.switchToPending() c.ps = &parseState{} if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } ping := []byte("PING\r\n") err := c.parse(ping[:1]) if err != nil || c.ps.state != OP_P { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping[1:2]) if err != nil || c.ps.state != OP_PI { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping[2:3]) if err != nil || c.ps.state != OP_PIN { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping[3:4]) if err != nil || c.ps.state != OP_PING { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping[4:5]) if err != nil || c.ps.state != OP_PING { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping[5:6]) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(ping) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Should tolerate spaces ping = []byte("PING \r") err = c.parse(ping) if err != nil || c.ps.state != OP_PING { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } c.ps.state = OP_START ping = []byte("PING \r \n") err = c.parse(ping) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } } func TestParserErr(t *testing.T) { c := &Conn{} c.status = CLOSED c.newReaderWriter() c.bw.switchToPending() c.ps = &parseState{} // This test focuses on the parser only, not how the error is // actually processed by the upper layer. if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } expectedError := "'Any kind of error'" errProto := []byte("-ERR " + expectedError + "\r\n") err := c.parse(errProto[:1]) if err != nil || c.ps.state != OP_MINUS { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[1:2]) if err != nil || c.ps.state != OP_MINUS_E { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[2:3]) if err != nil || c.ps.state != OP_MINUS_ER { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[3:4]) if err != nil || c.ps.state != OP_MINUS_ERR { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[4:5]) if err != nil || c.ps.state != OP_MINUS_ERR_SPC { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[5:6]) if err != nil || c.ps.state != OP_MINUS_ERR_SPC { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Check with split arg buffer err = c.parse(errProto[6:7]) if err != nil || c.ps.state != MINUS_ERR_ARG { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[7:10]) if err != nil || c.ps.state != MINUS_ERR_ARG { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[10 : len(errProto)-2]) if err != nil || c.ps.state != MINUS_ERR_ARG { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } if c.ps.argBuf == nil { t.Fatal("ArgBuf should not be nil") } s := string(c.ps.argBuf) if s != expectedError { t.Fatalf("Expected %v, got %v", expectedError, s) } err = c.parse(errProto[len(errProto)-2:]) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Check without split arg buffer errProto = []byte("-ERR 'Any error'\r\n") err = c.parse(errProto) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } } func TestParserOK(t *testing.T) { c := &Conn{} c.ps = &parseState{} if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } errProto := []byte("+OKay\r\n") err := c.parse(errProto[:1]) if err != nil || c.ps.state != OP_PLUS { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[1:2]) if err != nil || c.ps.state != OP_PLUS_O { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[2:3]) if err != nil || c.ps.state != OP_PLUS_OK { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(errProto[3:]) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } } func TestParserShouldFail(t *testing.T) { c := &Conn{} c.ps = &parseState{} if err := c.parse([]byte(" PING")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("POO")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("Px")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("PIx")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("PINx")); err == nil { t.Fatal("Should have received a parse error") } // Stop here because 'PING' protos are tolerant for anything between PING and \n c.ps.state = OP_START if err := c.parse([]byte("POx")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("PONx")); err == nil { t.Fatal("Should have received a parse error") } // Stop here because 'PONG' protos are tolerant for anything between PONG and \n c.ps.state = OP_START if err := c.parse([]byte("ZOO")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("Mx\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSx\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSGx\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG foo\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG \r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG foo 1\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG foo bar 1\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG foo bar 1 baz\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("MSG foo 1 bar baz\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("+x\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("+Ox\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("-x\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("-Ex\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("-ERx\r\n")); err == nil { t.Fatal("Should have received a parse error") } c.ps.state = OP_START if err := c.parse([]byte("-ERRx\r\n")); err == nil { t.Fatal("Should have received a parse error") } } func TestParserSplitMsg(t *testing.T) { nc := &Conn{} nc.ps = &parseState{} buf := []byte("MSG a\r\n") err := nc.parse(buf) if err == nil { t.Fatal("Expected an error") } nc.ps = &parseState{} buf = []byte("MSG a b c\r\n") err = nc.parse(buf) if err == nil { t.Fatal("Expected an error") } nc.ps = &parseState{} expectedCount := uint64(1) expectedSize := uint64(3) buf = []byte("MSG a") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.argBuf == nil { t.Fatal("Arg buffer should have been created") } buf = []byte(" 1 3\r\nf") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.ma.size != 3 { t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) } if nc.ps.ma.sid != 1 { t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) } if string(nc.ps.ma.subject) != "a" { t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) } if nc.ps.msgBuf == nil { t.Fatal("Msg buffer should have been created") } buf = []byte("oo\r\n") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) } if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { t.Fatal("Buffers should be nil now") } buf = []byte("MSG a 1 3\r\nfo") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.ma.size != 3 { t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) } if nc.ps.ma.sid != 1 { t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) } if string(nc.ps.ma.subject) != "a" { t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) } if nc.ps.argBuf == nil { t.Fatal("Arg buffer should have been created") } if nc.ps.msgBuf == nil { t.Fatal("Msg buffer should have been created") } expectedCount++ expectedSize += 3 buf = []byte("o\r\n") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) } if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { t.Fatal("Buffers should be nil now") } buf = []byte("MSG a 1 6\r\nfo") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.ma.size != 6 { t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) } if nc.ps.ma.sid != 1 { t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) } if string(nc.ps.ma.subject) != "a" { t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) } if nc.ps.argBuf == nil { t.Fatal("Arg buffer should have been created") } if nc.ps.msgBuf == nil { t.Fatal("Msg buffer should have been created") } buf = []byte("ob") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } expectedCount++ expectedSize += 6 buf = []byte("ar\r\n") err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) } if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { t.Fatal("Buffers should be nil now") } // Let's have a msg that is bigger than the parser's scratch size. // Since we prepopulate the msg with 'foo', adding 3 to the size. msgSize := cap(nc.ps.scratch) + 100 + 3 buf = []byte(fmt.Sprintf("MSG a 1 b %d\r\nfoo", msgSize)) err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.ma.size != msgSize { t.Fatalf("Wrong msg size: %d instead of %d", nc.ps.ma.size, msgSize) } if nc.ps.ma.sid != 1 { t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) } if string(nc.ps.ma.subject) != "a" { t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) } if string(nc.ps.ma.reply) != "b" { t.Fatalf("Wrong reply: '%s' instead of 'b'", string(nc.ps.ma.reply)) } if nc.ps.argBuf == nil { t.Fatal("Arg buffer should have been created") } if nc.ps.msgBuf == nil { t.Fatal("Msg buffer should have been created") } expectedCount++ expectedSize += uint64(msgSize) bufSize := msgSize - 3 buf = make([]byte, bufSize) for i := 0; i < bufSize; i++ { buf[i] = byte('a' + (i % 26)) } err = nc.parse(buf) if err != nil { t.Fatalf("Parser error: %v", err) } if nc.ps.state != MSG_PAYLOAD { t.Fatalf("Wrong state: %v instead of %v", nc.ps.state, MSG_PAYLOAD) } if nc.ps.ma.size != msgSize { t.Fatalf("Wrong (ma) msg size: %d instead of %d", nc.ps.ma.size, msgSize) } if len(nc.ps.msgBuf) != msgSize { t.Fatalf("Wrong msg size: %d instead of %d", len(nc.ps.msgBuf), msgSize) } // Check content: if string(nc.ps.msgBuf[0:3]) != "foo" { t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) } for k := 3; k < nc.ps.ma.size; k++ { if nc.ps.msgBuf[k] != byte('a'+((k-3)%26)) { t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) } } buf = []byte("\r\n") if err := nc.parse(buf); err != nil { t.Fatalf("Unexpected error during parsing: %v", err) } if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) } if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { t.Fatal("Buffers should be nil now") } if nc.ps.state != OP_START { t.Fatalf("Wrong state: %v", nc.ps.state) } } func TestNormalizeError(t *testing.T) { expected := "Typical Error" if s := normalizeErr("-ERR '" + expected + "'"); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } expected = "Trim Surrounding Spaces" if s := normalizeErr("-ERR '" + expected + "' "); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } expected = "Trim Surrounding Spaces Without Quotes" if s := normalizeErr("-ERR " + expected + " "); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } expected = "Error Without Quotes" if s := normalizeErr("-ERR " + expected); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } expected = "Error With Quote Only On Left" if s := normalizeErr("-ERR '" + expected); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } expected = "Error With Quote Only On Right" if s := normalizeErr("-ERR " + expected + "'"); s != expected { t.Fatalf("Expected '%s', got '%s'", expected, s) } } func TestAsyncINFO(t *testing.T) { opts := GetDefaultOptions() c := &Conn{Opts: opts} c.ps = &parseState{} if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } info := []byte("INFO {}\r\n") if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } err := c.parse(info[:1]) if err != nil || c.ps.state != OP_I { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(info[1:2]) if err != nil || c.ps.state != OP_IN { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(info[2:3]) if err != nil || c.ps.state != OP_INF { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(info[3:4]) if err != nil || c.ps.state != OP_INFO { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(info[4:5]) if err != nil || c.ps.state != OP_INFO_SPC { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } err = c.parse(info[5:]) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // All at once err = c.parse(info) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Server pool needs to be setup c.setupServerPool() // Partials requiring argBuf expectedServer := serverInfo{ ID: "test", Host: "localhost", Port: 4222, AuthRequired: true, TLSRequired: true, MaxPayload: 2 * 1024 * 1024, ConnectURLs: []string{"localhost:5222", "localhost:6222"}, } // Set NoRandomize so that the check with expectedServer info // matches. c.Opts.NoRandomize = true b, _ := json.Marshal(expectedServer) info = []byte(fmt.Sprintf("INFO %s\r\n", b)) if c.ps.state != OP_START { t.Fatalf("Expected OP_START vs %d\n", c.ps.state) } err = c.parse(info[:9]) if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) } err = c.parse(info[9:11]) if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) } err = c.parse(info[11:]) if err != nil || c.ps.state != OP_START || c.ps.argBuf != nil { t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) } if !reflect.DeepEqual(c.info, expectedServer) { t.Fatalf("Expected server info to be: %v, got: %v", expectedServer, c.info) } // Good INFOs good := []string{"INFO {}\r\n", "INFO {}\r\n", "INFO {} \r\n", "INFO { \"server_id\": \"test\" } \r\n", "INFO {\"connect_urls\":[]}\r\n"} for _, gi := range good { c.ps = &parseState{} err = c.parse([]byte(gi)) if err != nil || c.ps.state != OP_START { t.Fatalf("Protocol %q should be fine. Err=%v state=%v", gi, err, c.ps.state) } } // Wrong INFOs wrong := []string{"IxNFO {}\r\n", "INxFO {}\r\n", "INFxO {}\r\n", "INFOx {}\r\n", "INFO{}\r\n", "INFO {}"} for _, wi := range wrong { c.ps = &parseState{} err = c.parse([]byte(wi)) if err == nil && c.ps.state == OP_START { t.Fatalf("Protocol %q should have failed", wi) } } checkPool := func(urls ...string) { // Check both pool and urls map if len(c.srvPool) != len(urls) { stackFatalf(t, "Pool should have %d elements, has %d", len(urls), len(c.srvPool)) } if len(c.urls) != len(urls) { stackFatalf(t, "Map should have %d elements, has %d", len(urls), len(c.urls)) } for _, url := range urls { if _, present := c.urls[url]; !present { stackFatalf(t, "Pool should have %q", url) } } } // Now test the decoding of "connect_urls" // Reset the pool c.setupServerPool() // Reinitialize the parser c.ps = &parseState{} info = []byte("INFO {\"connect_urls\":[\"localhost:4222\", \"localhost:5222\"]}\r\n") err = c.parse(info) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Pool now should contain 127.0.0.1:4222 (the default URL), localhost:4222 and localhost:5222 checkPool("127.0.0.1:4222", "localhost:4222", "localhost:5222") // Make sure that if client receives the same, it is not added again. err = c.parse(info) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Pool should still contain 127.0.0.1:4222 (the default URL), localhost:4222 and localhost:5222 checkPool("127.0.0.1:4222", "localhost:4222", "localhost:5222") // Receive a new URL info = []byte("INFO {\"connect_urls\":[\"localhost:4222\", \"localhost:5222\", \"localhost:6222\"]}\r\n") err = c.parse(info) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Pool now should contain 127.0.0.1:4222 (the default URL), localhost:4222, localhost:5222 and localhost:6222 checkPool("127.0.0.1:4222", "localhost:4222", "localhost:5222", "localhost:6222") // Check that pool may be randomized on setup, but new URLs are always // added at end of pool. c.Opts.NoRandomize = false c.Opts.Servers = testServers // Reset the pool c.setupServerPool() // Reinitialize the parser c.ps = &parseState{} // Capture the pool sequence after randomization urlsAfterPoolSetup := make([]string, 0, len(c.srvPool)) for _, srv := range c.srvPool { urlsAfterPoolSetup = append(urlsAfterPoolSetup, srv.url.Host) } checkNewURLsAddedRandomly := func() { t.Helper() var ok bool for i := 0; i < len(urlsAfterPoolSetup); i++ { if c.srvPool[i].url.Host != urlsAfterPoolSetup[i] { ok = true break } } if !ok { t.Fatalf("New URLs were not added randmonly: %q", c.Servers()) } } // Add new urls newURLs := "\"impA:4222\", \"impB:4222\", \"impC:4222\", " + "\"impD:4222\", \"impE:4222\", \"impF:4222\", \"impG:4222\", " + "\"impH:4222\", \"impI:4222\", \"impJ:4222\"" info = []byte("INFO {\"connect_urls\":[" + newURLs + "]}\r\n") err = c.parse(info) if err != nil || c.ps.state != OP_START { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } checkNewURLsAddedRandomly() // Check that we have not moved the first URL if u := c.srvPool[0].url.Host; u != urlsAfterPoolSetup[0] { t.Fatalf("Expected first URL to be %q, got %q", urlsAfterPoolSetup[0], u) } } func TestConnServers(t *testing.T) { opts := GetDefaultOptions() c := &Conn{Opts: opts} c.ps = &parseState{} c.setupServerPool() validateURLs := func(serverUrls []string, expectedUrls ...string) { var found bool if len(serverUrls) != len(expectedUrls) { stackFatalf(t, "Array should have %d elements, has %d", len(expectedUrls), len(serverUrls)) } for _, ev := range expectedUrls { found = false for _, av := range serverUrls { if ev == av { found = true break } } if !found { stackFatalf(t, "array is missing %q in %v", ev, serverUrls) } } } // check the default url validateURLs(c.Servers(), "nats://127.0.0.1:4222") if len(c.DiscoveredServers()) != 0 { t.Fatalf("Expected no discovered servers") } // Add a new URL err := c.parse([]byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n")) if err != nil { t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) } // Server list should now contain both the default and the new url. validateURLs(c.Servers(), "nats://127.0.0.1:4222", "nats://localhost:5222") // Discovered servers should only contain the new url. validateURLs(c.DiscoveredServers(), "nats://localhost:5222") // verify user credentials are stripped out. opts.Servers = []string{"nats://user:pass@localhost:4333", "nats://token@localhost:4444"} c = &Conn{Opts: opts} c.ps = &parseState{} c.setupServerPool() validateURLs(c.Servers(), "nats://localhost:4333", "nats://localhost:4444") } func TestNoEchoOldServer(t *testing.T) { opts := GetDefaultOptions() opts.Url = DefaultURL opts.NoEcho = true nc := &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Problem setting up Server Pool: %v\n", err) } // Old style with no proto, meaning 0. We need Proto:1 for NoEcho support. oldInfo := "{\"server_id\":\"22\",\"version\":\"1.1.0\",\"go\":\"go1.10.2\",\"port\":4222,\"max_payload\":1048576}" err := nc.processInfo(oldInfo) if err != nil { t.Fatalf("Error processing old style INFO: %v\n", err) } // Make sure connectProto generates an error. _, err = nc.connectProto() if err == nil { t.Fatalf("Expected an error but got none\n") } } func TestExpiredAuthentication(t *testing.T) { // The goal of these tests was to check how a client with an expiring JWT // behaves. It should receive an async -ERR indicating that the auth // has expired, which will trigger reconnects. There, the lib should // received -ERR for auth violation in response to the CONNECT (instead // of the PONG). The library should close the connection after receiving // twice the same auth error. // If we use an actual JWT that expires, the way the JWT library expires // a JWT cause the server to send the async -ERR first but then accepts // the CONNECT (since JWT lib does not say that it has expired), but // when the server sets up the expire callback, that callback fires right // away and so client receives async -ERR again. // So for a deterministic test, we won't use an actual NATS Server. // Instead, we will use a mock that simply returns appropriate -ERR and // ensure the client behaves as expected. for _, test := range []struct { name string expectedProto string expectedErr error ignoreAbort bool }{ {"expired users credentials", AUTHENTICATION_EXPIRED_ERR, ErrAuthExpired, false}, {"revoked users credentials", AUTHENTICATION_REVOKED_ERR, ErrAuthRevoked, false}, {"expired account", ACCOUNT_AUTHENTICATION_EXPIRED_ERR, ErrAccountAuthExpired, false}, {"expired users credentials", AUTHENTICATION_EXPIRED_ERR, ErrAuthExpired, true}, {"revoked users credentials", AUTHENTICATION_REVOKED_ERR, ErrAuthRevoked, true}, {"expired account", ACCOUNT_AUTHENTICATION_EXPIRED_ERR, ErrAccountAuthExpired, true}, } { t.Run(test.name, func(t *testing.T) { l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() connect := 0 for { conn, err := l.Accept() if err != nil { return } defer conn.Close() info := "INFO {\"server_id\":\"foobar\",\"nonce\":\"anonce\"}\r\n" conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 10*1024) br.ReadLine() br.ReadLine() if connect++; connect == 1 { conn.Write([]byte(fmt.Sprintf("%s%s", _PONG_OP_, _CRLF_))) time.Sleep(300 * time.Millisecond) conn.Write([]byte(fmt.Sprintf("-ERR '%s'\r\n", test.expectedProto))) } else { conn.Write([]byte(fmt.Sprintf("-ERR '%s'\r\n", AUTHORIZATION_ERR))) } conn.Close() } }() ch := make(chan bool) errCh := make(chan error, 10) url := fmt.Sprintf("nats://127.0.0.1:%d", addr.Port) opts := []Option{ ReconnectWait(25 * time.Millisecond), ReconnectJitter(0, 0), MaxReconnects(-1), ErrorHandler(func(_ *Conn, _ *Subscription, e error) { select { case errCh <- e: default: } }), ClosedHandler(func(nc *Conn) { ch <- true }), } if test.ignoreAbort { opts = append(opts, IgnoreAuthErrorAbort()) } nc, err := Connect(url, opts...) if err != nil { t.Fatalf("Expected to connect, got %v", err) } defer nc.Close() if test.ignoreAbort { // We expect more than 3 errors, as the connect attempt should not be aborted after 2 failed attempts. for i := 0; i < 4; i++ { select { case e := <-errCh: if i == 0 && e != test.expectedErr { t.Fatalf("Expected error %q, got %q", test.expectedErr, e) } else if i > 0 && e != ErrAuthorization { t.Fatalf("Expected error %q, got %q", ErrAuthorization, e) } case <-time.After(time.Second): if i == 0 { t.Fatalf("Missing %q error", test.expectedErr) } else { t.Fatalf("Missing %q error", ErrAuthorization) } } } return } // We should give up since we get the same error on both tries. if err := WaitTime(ch, 2*time.Second); err != nil { t.Fatal("Should have closed after multiple failed attempts.") } if stats := nc.Stats(); stats.Reconnects > 2 { t.Fatalf("Expected at most 2 reconnects, got %d", stats.Reconnects) } // We expect 3 errors, the expired auth/revoke error, then 2 AUTHORIZATION_ERR // before the connection is closed. for i := 0; i < 3; i++ { select { case e := <-errCh: if i == 0 && e != test.expectedErr { t.Fatalf("Expected error %q, got %q", test.expectedErr, e) } else if i > 0 && e != ErrAuthorization { t.Fatalf("Expected error %q, got %q", ErrAuthorization, e) } default: if i == 0 { t.Fatalf("Missing %q error", test.expectedErr) } else { t.Fatalf("Missing %q error", ErrAuthorization) } } } // We should not have any more error select { case e := <-errCh: t.Fatalf("Extra error: %v", e) default: } // Close the listener and wait for go routine to end. l.Close() wg.Wait() }) } } func createTmpFile(t *testing.T, content []byte) string { t.Helper() conf, err := os.CreateTemp("", "") if err != nil { t.Fatalf("Error creating conf file: %v", err) } fName := conf.Name() conf.Close() if err := os.WriteFile(fName, content, 0666); err != nil { os.Remove(fName) t.Fatalf("Error writing conf file: %v", err) } return fName } func TestNKeyOptionFromSeed(t *testing.T) { if _, err := NkeyOptionFromSeed("file_that_does_not_exist"); err == nil { t.Fatal("Expected error got none") } seedFile := createTmpFile(t, []byte(` # No seed THIS_NOT_A_NKEY_SEED `)) defer os.Remove(seedFile) if _, err := NkeyOptionFromSeed(seedFile); err == nil || !strings.Contains(err.Error(), "seed found") { t.Fatalf("Expected error about seed not found, got %v", err) } os.Remove(seedFile) seedFile = createTmpFile(t, []byte(` # Invalid seed SUBADSEED `)) // Make sure that we detect SU (trim space) but it still fails because // this is not a valid NKey. if _, err := NkeyOptionFromSeed(seedFile); err == nil || strings.Contains(err.Error(), "seed found") { t.Fatalf("Expected error about invalid key, got %v", err) } os.Remove(seedFile) kp, _ := nkeys.CreateUser() seed, _ := kp.Seed() seedFile = createTmpFile(t, seed) opt, err := NkeyOptionFromSeed(seedFile) if err != nil { t.Fatalf("Error: %v", err) } l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) ch := make(chan bool, 1) errCh := make(chan error, 1) rs := func(ch chan bool) { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := "INFO {\"server_id\":\"foobar\",\"nonce\":\"anonce\"}\r\n" conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 10*1024) line, _, err := br.ReadLine() if err != nil { errCh <- fmt.Errorf("expected CONNECT and PING from client, got: %s", err) return } // If client got an error reading the seed, it will not send it if bytes.Contains(line, []byte(`"sig":`)) { conn.Write([]byte("PONG\r\n")) } else { conn.Write([]byte(`-ERR go away\r\n`)) conn.Close() } // Now wait to be notified that we can finish <-ch errCh <- nil } go rs(ch) nc, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port), opt) if err != nil { t.Fatalf("Error on connect: %v", err) } nc.Close() close(ch) checkErrChannel(t, errCh) // Now that option is already created, change content of file os.WriteFile(seedFile, []byte(`xxxxx`), 0666) ch = make(chan bool, 1) go rs(ch) if _, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port), opt); err == nil { t.Fatal("Expected error, got none") } close(ch) checkErrChannel(t, errCh) } func TestNoPanicOnSrvPoolSizeChanging(t *testing.T) { listeners := []net.Listener{} ports := []int{} for i := 0; i < 3; i++ { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Could not listen on an ephemeral port: %v", err) } defer l.Close() tl := l.(*net.TCPListener) ports = append(ports, tl.Addr().(*net.TCPAddr).Port) listeners = append(listeners, l) } wg := sync.WaitGroup{} wg.Add(len(listeners)) connect := int32(0) srv := func(l net.Listener) { defer wg.Done() for { conn, err := l.Accept() if err != nil { return } defer conn.Close() var info string reject := atomic.AddInt32(&connect, 1) <= 2 if reject { // Sends a list of 3 servers, where the second does not actually run. // This server is going to reject the connect (with auth error), so // client will move to 2nd, fail, then go to third... info = fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"connect_urls\":[\"127.0.0.1:%d\",\"127.0.0.1:%d\",\"127.0.0.1:%d\"]}\r\n", ports[0], ports[1], ports[2]) } else { // This third server will return the INFO with only the original server // and the third one, which will make the srvPool size shrink down to 2. info = fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"connect_urls\":[\"127.0.0.1:%d\",\"127.0.0.1:%d\"]}\r\n", ports[0], ports[2]) } conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 10*1024) br.ReadLine() br.ReadLine() if reject { conn.Write([]byte(fmt.Sprintf("-ERR '%s'\r\n", AUTHORIZATION_ERR))) conn.Close() } else { conn.Write([]byte(pongProto)) br.ReadLine() } } } for _, l := range listeners { go srv(l) } time.Sleep(250 * time.Millisecond) nc, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", ports[0])) if err != nil { t.Fatalf("Error on connect: %v", err) } nc.Close() for _, l := range listeners { l.Close() } wg.Wait() } func TestHeaderParser(t *testing.T) { shouldErr := func(hdr string) { t.Helper() if _, err := DecodeHeadersMsg([]byte(hdr)); err == nil { t.Fatalf("Expected an error") } } shouldErr("NATS/1.0") shouldErr("NATS/1.0\r\n") shouldErr("NATS/1.0\r\nk1:v1") shouldErr("NATS/1.0\r\nk1:v1\r\n") // Check that we can do inline status and descriptions checkStatus := func(hdr string, status int, description string) { t.Helper() hdrs, err := DecodeHeadersMsg([]byte(hdr + "\r\n\r\n")) if err != nil { t.Fatalf("Unexpected error: %v", err) } if code, err := strconv.Atoi(hdrs.Get(statusHdr)); err != nil || code != status { t.Fatalf("Expected status of %d, got %s", status, hdrs.Get(statusHdr)) } if len(description) > 0 { if descr := hdrs.Get(descrHdr); err != nil || descr != description { t.Fatalf("Expected description of %q, got %q", description, descr) } } } checkStatus("NATS/1.0 503", 503, "") checkStatus("NATS/1.0 503 No Responders", 503, "No Responders") checkStatus("NATS/1.0 404 No Messages", 404, "No Messages") } func TestHeaderMultiLine(t *testing.T) { m := NewMsg("foo") m.Header = Header{ "CorrelationID": []string{"123"}, "Msg-ID": []string{"456"}, "X-NATS-Keys": []string{"A", "B", "C"}, "X-Test-Keys": []string{"D", "E", "F"}, } // Users can opt-in to canonicalize like http.Header does // by using http.Header#Set or http.Header#Add. http.Header(m.Header).Set("accept-encoding", "json") http.Header(m.Header).Add("AUTHORIZATION", "s3cr3t") // Multi Value Header becomes represented as multi-lines in the wire // since internally using same Write from http stdlib. m.Header.Set("X-Test", "First") m.Header.Add("X-Test", "Second") m.Header.Add("X-Test", "Third") b, err := m.headerBytes() if err != nil { t.Fatal(err) } result := string(b) expectedHeader := `NATS/1.0 Accept-Encoding: json Authorization: s3cr3t CorrelationID: 123 Msg-ID: 456 X-NATS-Keys: A X-NATS-Keys: B X-NATS-Keys: C X-Test: First X-Test: Second X-Test: Third X-Test-Keys: D X-Test-Keys: E X-Test-Keys: F ` if strings.Replace(expectedHeader, "\n", "\r\n", -1) != result { t.Fatalf("Expected: %q, got: %q", expectedHeader, result) } } func TestLameDuckMode(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Could not listen on an ephemeral port: %v", err) } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() ldmInfos := []string{"INFO {\"ldm\":true}\r\n", "INFO {\"connect_urls\":[\"127.0.0.1:1234\"],\"ldm\":true}\r\n"} for _, ldmInfo := range ldmInfos { conn, err := l.Accept() if err != nil { return } defer conn.Close() info := "INFO {\"server_id\":\"foobar\"}\r\n" conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 10*1024) br.ReadLine() br.ReadLine() conn.Write([]byte(pongProto)) // Wait a bit and then send a INFO with LDM time.Sleep(100 * time.Millisecond) conn.Write([]byte(ldmInfo)) br.ReadLine() conn.Close() } }() url := fmt.Sprintf("nats://127.0.0.1:%d", addr.Port) time.Sleep(100 * time.Millisecond) for _, test := range []struct { name string curls bool }{ {"without connect urls", false}, {"with connect urls", true}, } { t.Run(test.name, func(t *testing.T) { ch := make(chan bool, 1) errCh := make(chan error, 1) nc, err := Connect(url, DiscoveredServersHandler(func(nc *Conn) { ds := nc.DiscoveredServers() if !reflect.DeepEqual(ds, []string{"nats://127.0.0.1:1234"}) { errCh <- fmt.Errorf("wrong discovered servers: %q", ds) } else { errCh <- nil } }), LameDuckModeHandler(func(_ *Conn) { ch <- true }), ) if err != nil { t.Fatalf("Expected to connect, got %v", err) } defer nc.Close() select { case <-ch: case <-time.After(2 * time.Second): t.Fatal("should have been notified of LDM") } select { case e := <-errCh: if !test.curls { t.Fatal("should not have received connect urls") } else if e != nil { t.Fatal(e.Error()) } default: if test.curls { t.Fatal("should have received notification about discovered servers") } } nc.Close() }) } wg.Wait() } func BenchmarkHeaderDecode(b *testing.B) { benchmarks := []struct { name string header Header }{ {"Small - 25", Header{ "Msg-ID": []string{"123"}}, }, {"Medium - 141", Header{ "CorrelationID": []string{"123"}, "Msg-ID": []string{"456"}, "X-NATS-Keys": []string{"A", "B", "C"}, "X-Test-Keys": []string{"D", "E", "F"}, }}, {"Large - 368", Header{ "CorrelationID": []string{"123"}, "Msg-ID": []string{"456"}, "X-NATS-Keys": []string{"A", "B", "C"}, "X-Test-Keys": []string{"D", "E", "F"}, "X-A-Long-Header-1": []string{strings.Repeat("A", 100)}, "X-A-Long-Header-2": []string{strings.Repeat("A", 100)}, }}, } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { b.ReportAllocs() m := NewMsg("foo") m.Header = bm.header hdr, err := m.headerBytes() if err != nil { b.Fatalf("Unexpected error: %v", err) } for i := 0; i < b.N; i++ { if _, err := DecodeHeadersMsg(hdr); err != nil { b.Fatalf("Unexpected error: %v", err) } } }) } } nats.go-1.41.0/netchan.go000066400000000000000000000073651477351342400151420ustar00rootroot00000000000000// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "errors" "reflect" ) // This allows the functionality for network channels by binding send and receive Go chans // to subjects and optionally queue groups. // Data will be encoded and decoded via the EncodedConn and its associated encoders. // BindSendChan binds a channel for send operations to NATS. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) BindSendChan(subject string, channel any) error { chVal := reflect.ValueOf(channel) if chVal.Kind() != reflect.Chan { return ErrChanArg } go chPublish(c, chVal, subject) return nil } // Publish all values that arrive on the channel until it is closed or we // encounter an error. func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { for { val, ok := chVal.Recv() if !ok { // Channel has most likely been closed. return } if e := c.Publish(subject, val.Interface()); e != nil { // Do this under lock. c.Conn.mu.Lock() defer c.Conn.mu.Unlock() if c.Conn.Opts.AsyncErrorCB != nil { // FIXME(dlc) - Not sure this is the right thing to do. // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback if c.Conn.isClosed() { go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) } else { c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }) } } return } } } // BindRecvChan binds a channel for receive operations from NATS. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) { return c.bindRecvChan(subject, _EMPTY_, channel) } // BindRecvQueueChan binds a channel for queue-based receive operations from NATS. // // Deprecated: Encoded connections are no longer supported. func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) { return c.bindRecvChan(subject, queue, channel) } // Internal function to bind receive operations for a channel. func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscription, error) { chVal := reflect.ValueOf(channel) if chVal.Kind() != reflect.Chan { return nil, ErrChanArg } argType := chVal.Type().Elem() cb := func(m *Msg) { var oPtr reflect.Value if argType.Kind() != reflect.Ptr { oPtr = reflect.New(argType) } else { oPtr = reflect.New(argType.Elem()) } if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) if c.Conn.Opts.AsyncErrorCB != nil { c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }) } return } if argType.Kind() != reflect.Ptr { oPtr = reflect.Indirect(oPtr) } // This is a bit hacky, but in this instance we may be trying to send to a closed channel. // and the user does not know when it is safe to close the channel. defer func() { // If we have panicked, recover and close the subscription. if r := recover(); r != nil { m.Sub.Unsubscribe() } }() // Actually do the send to the channel. chVal.Send(oPtr) } return c.Conn.subscribe(subject, queue, cb, nil, nil, false, nil) } nats.go-1.41.0/object.go000066400000000000000000001145331477351342400147640ustar00rootroot00000000000000// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "bytes" "context" "crypto/sha256" "encoding/base64" "encoding/json" "errors" "fmt" "hash" "io" "net" "os" "strings" "sync" "time" "github.com/nats-io/nats.go/internal/parser" "github.com/nats-io/nuid" ) // ObjectStoreManager creates, loads and deletes Object Stores type ObjectStoreManager interface { // ObjectStore will look up and bind to an existing object store instance. ObjectStore(bucket string) (ObjectStore, error) // CreateObjectStore will create an object store. CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) // DeleteObjectStore will delete the underlying stream for the named object. DeleteObjectStore(bucket string) error // ObjectStoreNames is used to retrieve a list of bucket names ObjectStoreNames(opts ...ObjectOpt) <-chan string // ObjectStores is used to retrieve a list of bucket statuses ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus } // ObjectStore is a blob store capable of storing large objects efficiently in // JetStream streams type ObjectStore interface { // Put will place the contents from the reader into a new object. Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) // Get will pull the named object from the object store. Get(name string, opts ...GetObjectOpt) (ObjectResult, error) // PutBytes is convenience function to put a byte slice into this object store. PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) // PutString is convenience function to put a string into this object store. PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) // GetString is a convenience function to pull an object from this object store and return it as a string. GetString(name string, opts ...GetObjectOpt) (string, error) // PutFile is convenience function to put a file into this object store. PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) // GetFile is a convenience function to pull an object from this object store and place it in a file. GetFile(name, file string, opts ...GetObjectOpt) error // GetInfo will retrieve the current information for the object. GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) // UpdateMeta will update the metadata for the object. UpdateMeta(name string, meta *ObjectMeta) error // Delete will delete the named object. Delete(name string) error // AddLink will add a link to another object. AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) // AddBucketLink will add a link to another object store. AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) // Seal will seal the object store, no further modifications will be allowed. Seal() error // Watch for changes in the underlying store and receive meta information updates. Watch(opts ...WatchOpt) (ObjectWatcher, error) // List will list all the objects in this store. List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) // Status retrieves run-time status about the backing store of the bucket. Status() (ObjectStoreStatus, error) } type ObjectOpt interface { configureObject(opts *objOpts) error } type objOpts struct { ctx context.Context } // For nats.Context() support. func (ctx ContextOpt) configureObject(opts *objOpts) error { opts.ctx = ctx return nil } // ObjectWatcher is what is returned when doing a watch. type ObjectWatcher interface { // Updates returns a channel to read any updates to entries. Updates() <-chan *ObjectInfo // Stop will stop this watcher. Stop() error } var ( ErrObjectConfigRequired = errors.New("nats: object-store config required") ErrBadObjectMeta = errors.New("nats: object-store meta information invalid") ErrObjectNotFound = errors.New("nats: object not found") ErrInvalidStoreName = errors.New("nats: invalid object-store name") ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match") ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format") ErrNoObjectsFound = errors.New("nats: no objects found") ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name") ErrNameRequired = errors.New("nats: name is required") ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2") ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket") ErrObjectRequired = errors.New("nats: object required") ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object") ErrNoLinkToLink = errors.New("nats: not allowed to link to another link") ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket") ErrBucketRequired = errors.New("nats: bucket required") ErrBucketMalformed = errors.New("nats: bucket malformed") ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object") ) // ObjectStoreConfig is the config for the object store. type ObjectStoreConfig struct { Bucket string `json:"bucket"` Description string `json:"description,omitempty"` TTL time.Duration `json:"max_age,omitempty"` MaxBytes int64 `json:"max_bytes,omitempty"` Storage StorageType `json:"storage,omitempty"` Replicas int `json:"num_replicas,omitempty"` Placement *Placement `json:"placement,omitempty"` // Bucket-specific metadata // NOTE: Metadata requires nats-server v2.10.0+ Metadata map[string]string `json:"metadata,omitempty"` // Enable underlying stream compression. // NOTE: Compression is supported for nats-server 2.10.0+ Compression bool `json:"compression,omitempty"` } type ObjectStoreStatus interface { // Bucket is the name of the bucket Bucket() string // Description is the description supplied when creating the bucket Description() string // TTL indicates how long objects are kept in the bucket TTL() time.Duration // Storage indicates the underlying JetStream storage technology used to store data Storage() StorageType // Replicas indicates how many storage replicas are kept for the data in the bucket Replicas() int // Sealed indicates the stream is sealed and cannot be modified in any way Sealed() bool // Size is the combined size of all data in the bucket including metadata, in bytes Size() uint64 // BackingStore provides details about the underlying storage BackingStore() string // Metadata is the user supplied metadata for the bucket Metadata() map[string]string // IsCompressed indicates if the data is compressed on disk IsCompressed() bool } // ObjectMetaOptions type ObjectMetaOptions struct { Link *ObjectLink `json:"link,omitempty"` ChunkSize uint32 `json:"max_chunk_size,omitempty"` } // ObjectMeta is high level information about an object. type ObjectMeta struct { Name string `json:"name"` Description string `json:"description,omitempty"` Headers Header `json:"headers,omitempty"` Metadata map[string]string `json:"metadata,omitempty"` // Optional options. Opts *ObjectMetaOptions `json:"options,omitempty"` } // ObjectInfo is meta plus instance information. type ObjectInfo struct { ObjectMeta Bucket string `json:"bucket"` NUID string `json:"nuid"` Size uint64 `json:"size"` ModTime time.Time `json:"mtime"` Chunks uint32 `json:"chunks"` Digest string `json:"digest,omitempty"` Deleted bool `json:"deleted,omitempty"` } // ObjectLink is used to embed links to other buckets and objects. type ObjectLink struct { // Bucket is the name of the other object store. Bucket string `json:"bucket"` // Name can be used to link to a single object. // If empty means this is a link to the whole store, like a directory. Name string `json:"name,omitempty"` } // ObjectResult will return the underlying stream info and also be an io.ReadCloser. type ObjectResult interface { io.ReadCloser Info() (*ObjectInfo, error) Error() error } const ( objNameTmpl = "OBJ_%s" // OBJ_ // stream name objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject objNoPending = "0" objDefaultChunkSize = uint32(128 * 1024) // 128k objDigestType = "SHA-256=" objDigestTmpl = objDigestType + "%s" ) type obs struct { name string stream string js *js } // CreateObjectStore will create an object store. func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { if !js.nc.serverMinVersion(2, 6, 2) { return nil, ErrNeeds262 } if cfg == nil { return nil, ErrObjectConfigRequired } if !validBucketRe.MatchString(cfg.Bucket) { return nil, ErrInvalidStoreName } name := cfg.Bucket chunks := fmt.Sprintf(objAllChunksPreTmpl, name) meta := fmt.Sprintf(objAllMetaPreTmpl, name) // We will set explicitly some values so that we can do comparison // if we get an "already in use" error and need to check if it is same. // See kv replicas := cfg.Replicas if replicas == 0 { replicas = 1 } maxBytes := cfg.MaxBytes if maxBytes == 0 { maxBytes = -1 } var compression StoreCompression if cfg.Compression { compression = S2Compression } scfg := &StreamConfig{ Name: fmt.Sprintf(objNameTmpl, name), Description: cfg.Description, Subjects: []string{chunks, meta}, MaxAge: cfg.TTL, MaxBytes: maxBytes, Storage: cfg.Storage, Replicas: replicas, Placement: cfg.Placement, Discard: DiscardNew, AllowRollup: true, AllowDirect: true, Metadata: cfg.Metadata, Compression: compression, } // Create our stream. _, err := js.AddStream(scfg) if err != nil { return nil, err } return &obs{name: name, stream: scfg.Name, js: js}, nil } // ObjectStore will look up and bind to an existing object store instance. func (js *js) ObjectStore(bucket string) (ObjectStore, error) { if !validBucketRe.MatchString(bucket) { return nil, ErrInvalidStoreName } if !js.nc.serverMinVersion(2, 6, 2) { return nil, ErrNeeds262 } stream := fmt.Sprintf(objNameTmpl, bucket) si, err := js.StreamInfo(stream) if err != nil { return nil, err } return &obs{name: bucket, stream: si.Config.Name, js: js}, nil } // DeleteObjectStore will delete the underlying stream for the named object. func (js *js) DeleteObjectStore(bucket string) error { stream := fmt.Sprintf(objNameTmpl, bucket) return js.DeleteStream(stream) } func encodeName(name string) string { return base64.URLEncoding.EncodeToString([]byte(name)) } // Put will place the contents from the reader into this object-store. func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) { if meta == nil || meta.Name == "" { return nil, ErrBadObjectMeta } if meta.Opts == nil { meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} } else if meta.Opts.Link != nil { return nil, ErrLinkNotAllowed } else if meta.Opts.ChunkSize == 0 { meta.Opts.ChunkSize = objDefaultChunkSize } var o objOpts for _, opt := range opts { if opt != nil { if err := opt.configureObject(&o); err != nil { return nil, err } } } ctx := o.ctx // Create the new nuid so chunks go on a new subject if the name is re-used newnuid := nuid.Next() // These will be used in more than one place chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem // Chunks on the old nuid can be cleaned up at the end einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name if err != nil && err != ErrObjectNotFound { return nil, err } // For async error handling var perr error var mu sync.Mutex setErr := func(err error) { mu.Lock() defer mu.Unlock() perr = err } getErr := func() error { mu.Lock() defer mu.Unlock() return perr } // Create our own JS context to handle errors etc. jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) })) if err != nil { return nil, err } defer jetStream.(*js).cleanupReplySub() purgePartial := func() error { // wait until all pubs are complete or up to default timeout before attempting purge select { case <-jetStream.PublishAsyncComplete(): case <-time.After(obs.js.opts.wait): } if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}); err != nil { return fmt.Errorf("could not cleanup bucket after erroneous put operation: %w", err) } return nil } m, h := NewMsg(chunkSubj), sha256.New() chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) // set up the info object. The chunk upload sets the size and digest info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta} for r != nil { if ctx != nil { select { case <-ctx.Done(): if ctx.Err() == context.Canceled { err = ctx.Err() } else { err = ErrTimeout } default: } if err != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } return nil, err } } // Actual read. // TODO(dlc) - Deadline? n, readErr := r.Read(chunk) // Handle all non EOF errors if readErr != nil && readErr != io.EOF { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(readErr, purgeErr) } return nil, readErr } // Add chunk only if we received data if n > 0 { // Chunk processing. m.Data = chunk[:n] h.Write(m.Data) // Send msg itself. if _, err := jetStream.PublishMsgAsync(m); err != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } return nil, err } if err := getErr(); err != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } return nil, err } // Update totals. sent++ total += uint64(n) } // EOF Processing. if readErr == io.EOF { // Place meta info. info.Size, info.Chunks = uint64(total), uint32(sent) info.Digest = GetObjectDigestValue(h) break } } // Prepare the meta message metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) mm := NewMsg(metaSubj) mm.Header.Set(MsgRollup, MsgRollupSubject) mm.Data, err = json.Marshal(info) if err != nil { if r != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } } return nil, err } // Publish the meta message. _, err = jetStream.PublishMsgAsync(mm) if err != nil { if r != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } } return nil, err } // Wait for all to be processed. select { case <-jetStream.PublishAsyncComplete(): if err := getErr(); err != nil { if r != nil { if purgeErr := purgePartial(); purgeErr != nil { return nil, errors.Join(err, purgeErr) } } return nil, err } case <-time.After(obs.js.opts.wait): return nil, ErrTimeout } info.ModTime = time.Now().UTC() // This time is not actually the correct time // Delete any original chunks. if einfo != nil && !einfo.Deleted { echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}); err != nil { return info, err } } // TODO would it be okay to do this to return the info with the correct time? // With the understanding that it is an extra call to the server. // Otherwise the time the user gets back is the client time, not the server time. // return obs.GetInfo(info.Name) return info, nil } // GetObjectDigestValue calculates the base64 value of hashed data func GetObjectDigestValue(data hash.Hash) string { sha := data.Sum(nil) return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) } // DecodeObjectDigest decodes base64 hash func DecodeObjectDigest(data string) ([]byte, error) { digest := strings.SplitN(data, "=", 2) if len(digest) != 2 { return nil, ErrInvalidDigestFormat } return base64.URLEncoding.DecodeString(digest[1]) } // ObjectResult impl. type objResult struct { sync.Mutex info *ObjectInfo r io.ReadCloser err error ctx context.Context digest hash.Hash readTimeout time.Duration } func (info *ObjectInfo) isLink() bool { return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil } type GetObjectOpt interface { configureGetObject(opts *getObjectOpts) error } type getObjectOpts struct { ctx context.Context // Include deleted object in the result. showDeleted bool } type getObjectFn func(opts *getObjectOpts) error func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error { return opt(opts) } // GetObjectShowDeleted makes Get() return object if it was marked as deleted. func GetObjectShowDeleted() GetObjectOpt { return getObjectFn(func(opts *getObjectOpts) error { opts.showDeleted = true return nil }) } // For nats.Context() support. func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error { opts.ctx = ctx return nil } // Get will pull the object from the underlying stream. func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) { var o getObjectOpts for _, opt := range opts { if opt != nil { if err := opt.configureGetObject(&o); err != nil { return nil, err } } } ctx := o.ctx infoOpts := make([]GetObjectInfoOpt, 0) if ctx != nil { infoOpts = append(infoOpts, Context(ctx)) } if o.showDeleted { infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) } // Grab meta info. info, err := obs.GetInfo(name, infoOpts...) if err != nil { return nil, err } if info.NUID == _EMPTY_ { return nil, ErrBadObjectMeta } // Check for object links. If single objects we do a pass through. if info.isLink() { if info.ObjectMeta.Opts.Link.Name == _EMPTY_ { return nil, ErrCantGetBucket } // is the link in the same bucket? lbuck := info.ObjectMeta.Opts.Link.Bucket if lbuck == obs.name { return obs.Get(info.ObjectMeta.Opts.Link.Name) } // different bucket lobs, err := obs.js.ObjectStore(lbuck) if err != nil { return nil, err } return lobs.Get(info.ObjectMeta.Opts.Link.Name) } result := &objResult{info: info, ctx: ctx, readTimeout: obs.js.opts.wait} if info.Size == 0 { return result, nil } pr, pw := net.Pipe() result.r = pr gotErr := func(m *Msg, err error) { pw.Close() m.Sub.Unsubscribe() result.setErr(err) } // For calculating sum256 result.digest = sha256.New() processChunk := func(m *Msg) { var err error if ctx != nil { select { case <-ctx.Done(): if errors.Is(ctx.Err(), context.Canceled) { err = ctx.Err() } else { err = ErrTimeout } default: } if err != nil { gotErr(m, err) return } } tokens, err := parser.GetMetadataFields(m.Reply) if err != nil { gotErr(m, err) return } // Write to our pipe. for b := m.Data; len(b) > 0; { n, err := pw.Write(b) if err != nil { gotErr(m, err) return } b = b[n:] } // Update sha256 result.digest.Write(m.Data) // Check if we are done. if tokens[parser.AckNumPendingTokenPos] == objNoPending { pw.Close() m.Sub.Unsubscribe() } } chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) streamName := fmt.Sprintf(objNameTmpl, obs.name) subscribeOpts := []SubOpt{ OrderedConsumer(), BindStream(streamName), } _, err = obs.js.Subscribe(chunkSubj, processChunk, subscribeOpts...) if err != nil { return nil, err } return result, nil } // Delete will delete the object. func (obs *obs) Delete(name string) error { // Grab meta info. info, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) if err != nil { return err } if info.NUID == _EMPTY_ { return ErrBadObjectMeta } // Place a rollup delete marker and publish the info info.Deleted = true info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_ if err = publishMeta(info, obs.js); err != nil { return err } // Purge chunks for the object. chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) } func publishMeta(info *ObjectInfo, js JetStreamContext) error { // marshal the object into json, don't store an actual time info.ModTime = time.Time{} data, err := json.Marshal(info) if err != nil { return err } // Prepare and publish the message. mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) mm.Header.Set(MsgRollup, MsgRollupSubject) mm.Data = data if _, err := js.PublishMsg(mm); err != nil { return err } // set the ModTime in case it's returned to the user, even though it's not the correct time. info.ModTime = time.Now().UTC() return nil } // AddLink will add a link to another object if it's not deleted and not another link // name is the name of this link object // obj is what is being linked too func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) { if name == "" { return nil, ErrNameRequired } // TODO Handle stale info if obj == nil || obj.Name == "" { return nil, ErrObjectRequired } if obj.Deleted { return nil, ErrNoLinkToDeleted } if obj.isLink() { return nil, ErrNoLinkToLink } // If object with link's name is found, error. // If link with link's name is found, that's okay to overwrite. // If there was an error that was not ErrObjectNotFound, error. einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) if einfo != nil { if !einfo.isLink() { return nil, ErrObjectAlreadyExists } } else if err != ErrObjectNotFound { return nil, err } // create the meta for the link meta := &ObjectMeta{ Name: name, Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, } info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} // put the link object if err = publishMeta(info, obs.js); err != nil { return nil, err } return info, nil } // AddBucketLink will add a link to another object store. func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) { if name == "" { return nil, ErrNameRequired } if bucket == nil { return nil, ErrBucketRequired } bos, ok := bucket.(*obs) if !ok { return nil, ErrBucketMalformed } // If object with link's name is found, error. // If link with link's name is found, that's okay to overwrite. // If there was an error that was not ErrObjectNotFound, error. einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted()) if einfo != nil { if !einfo.isLink() { return nil, ErrObjectAlreadyExists } } else if err != ErrObjectNotFound { return nil, err } // create the meta for the link meta := &ObjectMeta{ Name: name, Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, } info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} // put the link object err = publishMeta(info, ob.js) if err != nil { return nil, err } return info, nil } // PutBytes is convenience function to put a byte slice into this object store. func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) { return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...) } // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) { result, err := obs.Get(name, opts...) if err != nil { return nil, err } defer result.Close() var b bytes.Buffer if _, err := b.ReadFrom(result); err != nil { return nil, err } return b.Bytes(), nil } // PutString is convenience function to put a string into this object store. func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) { return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...) } // GetString is a convenience function to pull an object from this object store and return it as a string. func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) { result, err := obs.Get(name, opts...) if err != nil { return _EMPTY_, err } defer result.Close() var b bytes.Buffer if _, err := b.ReadFrom(result); err != nil { return _EMPTY_, err } return b.String(), nil } // PutFile is convenience function to put a file into an object store. func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) { f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() return obs.Put(&ObjectMeta{Name: file}, f, opts...) } // GetFile is a convenience function to pull and object and place in a file. func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error { // Expect file to be new. f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return err } defer f.Close() result, err := obs.Get(name, opts...) if err != nil { os.Remove(f.Name()) return err } defer result.Close() // Stream copy to the file. _, err = io.Copy(f, result) return err } type GetObjectInfoOpt interface { configureGetInfo(opts *getObjectInfoOpts) error } type getObjectInfoOpts struct { ctx context.Context // Include deleted object in the result. showDeleted bool } type getObjectInfoFn func(opts *getObjectInfoOpts) error func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error { return opt(opts) } // GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted. func GetObjectInfoShowDeleted() GetObjectInfoOpt { return getObjectInfoFn(func(opts *getObjectInfoOpts) error { opts.showDeleted = true return nil }) } // For nats.Context() support. func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error { opts.ctx = ctx return nil } // GetInfo will retrieve the current information for the object. func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { // Grab last meta value we have. if name == "" { return nil, ErrNameRequired } var o getObjectInfoOpts for _, opt := range opts { if opt != nil { if err := opt.configureGetInfo(&o); err != nil { return nil, err } } } metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call stream := fmt.Sprintf(objNameTmpl, obs.name) m, err := obs.js.GetLastMsg(stream, metaSubj) if err != nil { if errors.Is(err, ErrMsgNotFound) { err = ErrObjectNotFound } return nil, err } var info ObjectInfo if err := json.Unmarshal(m.Data, &info); err != nil { return nil, ErrBadObjectMeta } if !o.showDeleted && info.Deleted { return nil, ErrObjectNotFound } info.ModTime = m.Time return &info, nil } // UpdateMeta will update the meta for the object. func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error { if meta == nil { return ErrBadObjectMeta } // Grab the current meta. info, err := obs.GetInfo(name) if err != nil { if errors.Is(err, ErrObjectNotFound) { return ErrUpdateMetaDeleted } return err } // If the new name is different from the old, and it exists, error // If there was an error that was not ErrObjectNotFound, error. if name != meta.Name { existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) if err != nil && !errors.Is(err, ErrObjectNotFound) { return err } if err == nil && !existingInfo.Deleted { return ErrObjectAlreadyExists } } // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) // These should only be updated internally when appropriate. info.Name = meta.Name info.Description = meta.Description info.Headers = meta.Headers info.Metadata = meta.Metadata // Prepare the meta message if err = publishMeta(info, obs.js); err != nil { return err } // did the name of this object change? We just stored the meta under the new name // so delete the meta from the old name via purge stream for subject if name != meta.Name { metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj}) } return nil } // Seal will seal the object store, no further modifications will be allowed. func (obs *obs) Seal() error { stream := fmt.Sprintf(objNameTmpl, obs.name) si, err := obs.js.StreamInfo(stream) if err != nil { return err } // Seal the stream from being able to take on more messages. cfg := si.Config cfg.Sealed = true _, err = obs.js.UpdateStream(&cfg) return err } // Implementation for Watch type objWatcher struct { updates chan *ObjectInfo sub *Subscription } // Updates returns the interior channel. func (w *objWatcher) Updates() <-chan *ObjectInfo { if w == nil { return nil } return w.updates } // Stop will unsubscribe from the watcher. func (w *objWatcher) Stop() error { if w == nil { return nil } return w.sub.Unsubscribe() } // Watch for changes in the underlying store and receive meta information updates. func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) { var o watchOpts for _, opt := range opts { if opt != nil { if err := opt.configureWatcher(&o); err != nil { return nil, err } } } var initDoneMarker bool w := &objWatcher{updates: make(chan *ObjectInfo, 32)} update := func(m *Msg) { var info ObjectInfo if err := json.Unmarshal(m.Data, &info); err != nil { return // TODO(dlc) - Communicate this upwards? } meta, err := m.Metadata() if err != nil { return } if !o.ignoreDeletes || !info.Deleted { info.ModTime = meta.Timestamp w.updates <- &info } // if UpdatesOnly is set, no not send nil to the channel // as it would always be triggered after initializing the watcher if !initDoneMarker && meta.NumPending == 0 { initDoneMarker = true w.updates <- nil } } allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) _, err := obs.js.GetLastMsg(obs.stream, allMeta) // if there are no messages on the stream and we are not watching // updates only, send nil to the channel to indicate that the initial // watch is done if !o.updatesOnly { if errors.Is(err, ErrMsgNotFound) { initDoneMarker = true w.updates <- nil } } else { // if UpdatesOnly was used, mark initialization as complete initDoneMarker = true } // Used ordered consumer to deliver results. streamName := fmt.Sprintf(objNameTmpl, obs.name) subOpts := []SubOpt{OrderedConsumer(), BindStream(streamName)} if !o.includeHistory { subOpts = append(subOpts, DeliverLastPerSubject()) } if o.updatesOnly { subOpts = append(subOpts, DeliverNew()) } sub, err := obs.js.Subscribe(allMeta, update, subOpts...) if err != nil { return nil, err } // Set us up to close when the waitForMessages func returns. sub.pDone = func(_ string) { close(w.updates) } w.sub = sub return w, nil } type ListObjectsOpt interface { configureListObjects(opts *listObjectOpts) error } type listObjectOpts struct { ctx context.Context // Include deleted objects in the result channel. showDeleted bool } type listObjectsFn func(opts *listObjectOpts) error func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error { return opt(opts) } // ListObjectsShowDeleted makes ListObjects() return deleted objects. func ListObjectsShowDeleted() ListObjectsOpt { return listObjectsFn(func(opts *listObjectOpts) error { opts.showDeleted = true return nil }) } // For nats.Context() support. func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error { opts.ctx = ctx return nil } // List will list all the objects in this store. func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) { var o listObjectOpts for _, opt := range opts { if opt != nil { if err := opt.configureListObjects(&o); err != nil { return nil, err } } } watchOpts := make([]WatchOpt, 0) if !o.showDeleted { watchOpts = append(watchOpts, IgnoreDeletes()) } watcher, err := obs.Watch(watchOpts...) if err != nil { return nil, err } defer watcher.Stop() if o.ctx == nil { o.ctx = context.Background() } var objs []*ObjectInfo updates := watcher.Updates() Updates: for { select { case entry := <-updates: if entry == nil { break Updates } objs = append(objs, entry) case <-o.ctx.Done(): return nil, o.ctx.Err() } } if len(objs) == 0 { return nil, ErrNoObjectsFound } return objs, nil } // ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus type ObjectBucketStatus struct { nfo *StreamInfo bucket string } // Bucket is the name of the bucket func (s *ObjectBucketStatus) Bucket() string { return s.bucket } // Description is the description supplied when creating the bucket func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } // TTL indicates how long objects are kept in the bucket func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } // Storage indicates the underlying JetStream storage technology used to store data func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } // Replicas indicates how many storage replicas are kept for the data in the bucket func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } // Sealed indicates the stream is sealed and cannot be modified in any way func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } // Size is the combined size of all data in the bucket including metadata, in bytes func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } // BackingStore indicates what technology is used for storage of the bucket func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } // Metadata is the metadata supplied when creating the bucket func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } // StreamInfo is the stream info retrieved to create the status func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } // IsCompressed indicates if the data is compressed on disk func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } // Status retrieves run-time status about a bucket func (obs *obs) Status() (ObjectStoreStatus, error) { nfo, err := obs.js.StreamInfo(obs.stream) if err != nil { return nil, err } status := &ObjectBucketStatus{ nfo: nfo, bucket: obs.name, } return status, nil } // Read impl. func (o *objResult) Read(p []byte) (n int, err error) { o.Lock() defer o.Unlock() readDeadline := time.Now().Add(o.readTimeout) if ctx := o.ctx; ctx != nil { if deadline, ok := ctx.Deadline(); ok { readDeadline = deadline } select { case <-ctx.Done(): if ctx.Err() == context.Canceled { o.err = ctx.Err() } else { o.err = ErrTimeout } default: } } if o.err != nil { return 0, o.err } if o.r == nil { return 0, io.EOF } r := o.r.(net.Conn) r.SetReadDeadline(readDeadline) n, err = r.Read(p) if err, ok := err.(net.Error); ok && err.Timeout() { if ctx := o.ctx; ctx != nil { select { case <-ctx.Done(): if ctx.Err() == context.Canceled { return 0, ctx.Err() } else { return 0, ErrTimeout } default: err = nil } } } if err == io.EOF { // Make sure the digest matches. sha := o.digest.Sum(nil) rsha, decodeErr := DecodeObjectDigest(o.info.Digest) if decodeErr != nil { o.err = decodeErr return 0, o.err } if !bytes.Equal(sha[:], rsha) { o.err = ErrDigestMismatch return 0, o.err } } return n, err } // Close impl. func (o *objResult) Close() error { o.Lock() defer o.Unlock() if o.r == nil { return nil } return o.r.Close() } func (o *objResult) setErr(err error) { o.Lock() defer o.Unlock() o.err = err } func (o *objResult) Info() (*ObjectInfo, error) { o.Lock() defer o.Unlock() return o.info, o.err } func (o *objResult) Error() error { o.Lock() defer o.Unlock() return o.err } // ObjectStoreNames is used to retrieve a list of bucket names func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string { var o objOpts for _, opt := range opts { if opt != nil { if err := opt.configureObject(&o); err != nil { return nil } } } ch := make(chan string) var cancel context.CancelFunc if o.ctx == nil { o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) } l := &streamLister{js: js} l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") l.js.opts.ctx = o.ctx go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { if !strings.HasPrefix(info.Config.Name, "OBJ_") { continue } select { case ch <- info.Config.Name: case <-o.ctx.Done(): return } } } }() return ch } // ObjectStores is used to retrieve a list of bucket statuses func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus { var o objOpts for _, opt := range opts { if opt != nil { if err := opt.configureObject(&o); err != nil { return nil } } } ch := make(chan ObjectStoreStatus) var cancel context.CancelFunc if o.ctx == nil { o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) } l := &streamLister{js: js} l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") l.js.opts.ctx = o.ctx go func() { if cancel != nil { defer cancel() } defer close(ch) for l.Next() { for _, info := range l.Page() { if !strings.HasPrefix(info.Config.Name, "OBJ_") { continue } select { case ch <- &ObjectBucketStatus{ nfo: info, bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), }: case <-o.ctx.Done(): return } } } }() return ch } nats.go-1.41.0/parser.go000066400000000000000000000265761477351342400150230ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "fmt" ) type msgArg struct { subject []byte reply []byte sid int64 hdr int size int } const MAX_CONTROL_LINE_SIZE = 4096 type parseState struct { state int as int drop int hdr int ma msgArg argBuf []byte msgBuf []byte msgCopied bool scratch [MAX_CONTROL_LINE_SIZE]byte } const ( OP_START = iota OP_PLUS OP_PLUS_O OP_PLUS_OK OP_MINUS OP_MINUS_E OP_MINUS_ER OP_MINUS_ERR OP_MINUS_ERR_SPC MINUS_ERR_ARG OP_M OP_MS OP_MSG OP_MSG_SPC MSG_ARG MSG_PAYLOAD MSG_END OP_H OP_P OP_PI OP_PIN OP_PING OP_PO OP_PON OP_PONG OP_I OP_IN OP_INF OP_INFO OP_INFO_SPC INFO_ARG ) // parse is the fast protocol parser engine. func (nc *Conn) parse(buf []byte) error { var i int var b byte // Move to loop instead of range syntax to allow jumping of i for i = 0; i < len(buf); i++ { b = buf[i] switch nc.ps.state { case OP_START: switch b { case 'M', 'm': nc.ps.state = OP_M nc.ps.hdr = -1 nc.ps.ma.hdr = -1 case 'H', 'h': nc.ps.state = OP_H nc.ps.hdr = 0 nc.ps.ma.hdr = 0 case 'P', 'p': nc.ps.state = OP_P case '+': nc.ps.state = OP_PLUS case '-': nc.ps.state = OP_MINUS case 'I', 'i': nc.ps.state = OP_I default: goto parseErr } case OP_H: switch b { case 'M', 'm': nc.ps.state = OP_M default: goto parseErr } case OP_M: switch b { case 'S', 's': nc.ps.state = OP_MS default: goto parseErr } case OP_MS: switch b { case 'G', 'g': nc.ps.state = OP_MSG default: goto parseErr } case OP_MSG: switch b { case ' ', '\t': nc.ps.state = OP_MSG_SPC default: goto parseErr } case OP_MSG_SPC: switch b { case ' ', '\t': continue default: nc.ps.state = MSG_ARG nc.ps.as = i } case MSG_ARG: switch b { case '\r': nc.ps.drop = 1 case '\n': var arg []byte if nc.ps.argBuf != nil { arg = nc.ps.argBuf } else { arg = buf[nc.ps.as : i-nc.ps.drop] } if err := nc.processMsgArgs(arg); err != nil { return err } nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD // jump ahead with the index. If this overruns // what is left we fall out and process a split buffer. i = nc.ps.as + nc.ps.ma.size - 1 default: if nc.ps.argBuf != nil { nc.ps.argBuf = append(nc.ps.argBuf, b) } } case MSG_PAYLOAD: if nc.ps.msgBuf != nil { if len(nc.ps.msgBuf) >= nc.ps.ma.size { nc.processMsg(nc.ps.msgBuf) nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END } else { // copy as much as we can to the buffer and skip ahead. toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) avail := len(buf) - i if avail < toCopy { toCopy = avail } if toCopy > 0 { start := len(nc.ps.msgBuf) // This is needed for copy to work. nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) // Update our index i = (i + toCopy) - 1 } else { nc.ps.msgBuf = append(nc.ps.msgBuf, b) } } } else if i-nc.ps.as >= nc.ps.ma.size { nc.processMsg(buf[nc.ps.as:i]) nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END } case MSG_END: switch b { case '\n': nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START default: continue } case OP_PLUS: switch b { case 'O', 'o': nc.ps.state = OP_PLUS_O default: goto parseErr } case OP_PLUS_O: switch b { case 'K', 'k': nc.ps.state = OP_PLUS_OK default: goto parseErr } case OP_PLUS_OK: switch b { case '\n': nc.processOK() nc.ps.drop, nc.ps.state = 0, OP_START } case OP_MINUS: switch b { case 'E', 'e': nc.ps.state = OP_MINUS_E default: goto parseErr } case OP_MINUS_E: switch b { case 'R', 'r': nc.ps.state = OP_MINUS_ER default: goto parseErr } case OP_MINUS_ER: switch b { case 'R', 'r': nc.ps.state = OP_MINUS_ERR default: goto parseErr } case OP_MINUS_ERR: switch b { case ' ', '\t': nc.ps.state = OP_MINUS_ERR_SPC default: goto parseErr } case OP_MINUS_ERR_SPC: switch b { case ' ', '\t': continue default: nc.ps.state = MINUS_ERR_ARG nc.ps.as = i } case MINUS_ERR_ARG: switch b { case '\r': nc.ps.drop = 1 case '\n': var arg []byte if nc.ps.argBuf != nil { arg = nc.ps.argBuf nc.ps.argBuf = nil } else { arg = buf[nc.ps.as : i-nc.ps.drop] } nc.processErr(string(arg)) nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START default: if nc.ps.argBuf != nil { nc.ps.argBuf = append(nc.ps.argBuf, b) } } case OP_P: switch b { case 'I', 'i': nc.ps.state = OP_PI case 'O', 'o': nc.ps.state = OP_PO default: goto parseErr } case OP_PO: switch b { case 'N', 'n': nc.ps.state = OP_PON default: goto parseErr } case OP_PON: switch b { case 'G', 'g': nc.ps.state = OP_PONG default: goto parseErr } case OP_PONG: switch b { case '\n': nc.processPong() nc.ps.drop, nc.ps.state = 0, OP_START } case OP_PI: switch b { case 'N', 'n': nc.ps.state = OP_PIN default: goto parseErr } case OP_PIN: switch b { case 'G', 'g': nc.ps.state = OP_PING default: goto parseErr } case OP_PING: switch b { case '\n': nc.processPing() nc.ps.drop, nc.ps.state = 0, OP_START } case OP_I: switch b { case 'N', 'n': nc.ps.state = OP_IN default: goto parseErr } case OP_IN: switch b { case 'F', 'f': nc.ps.state = OP_INF default: goto parseErr } case OP_INF: switch b { case 'O', 'o': nc.ps.state = OP_INFO default: goto parseErr } case OP_INFO: switch b { case ' ', '\t': nc.ps.state = OP_INFO_SPC default: goto parseErr } case OP_INFO_SPC: switch b { case ' ', '\t': continue default: nc.ps.state = INFO_ARG nc.ps.as = i } case INFO_ARG: switch b { case '\r': nc.ps.drop = 1 case '\n': var arg []byte if nc.ps.argBuf != nil { arg = nc.ps.argBuf nc.ps.argBuf = nil } else { arg = buf[nc.ps.as : i-nc.ps.drop] } nc.processAsyncInfo(arg) nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START default: if nc.ps.argBuf != nil { nc.ps.argBuf = append(nc.ps.argBuf, b) } } default: goto parseErr } } // Check for split buffer scenarios if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { nc.ps.argBuf = nc.ps.scratch[:0] nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) // FIXME, check max len } // Check for split msg if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { // We need to clone the msgArg if it is still referencing the // read buffer and we are not able to process the msg. if nc.ps.argBuf == nil { nc.cloneMsgArg() } // If we will overflow the scratch buffer, just create a // new buffer to hold the split message. if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { lrem := len(buf[nc.ps.as:]) nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) copy(nc.ps.msgBuf, buf[nc.ps.as:]) nc.ps.msgCopied = true } else { nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) } } return nil parseErr: return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) } // cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but // we need to hold onto it into the next read. func (nc *Conn) cloneMsgArg() { nc.ps.argBuf = nc.ps.scratch[:0] nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] if nc.ps.ma.reply != nil { nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] } } const argsLenMax = 4 func (nc *Conn) processMsgArgs(arg []byte) error { // Use separate function for header based messages. if nc.ps.hdr >= 0 { return nc.processHeaderMsgArgs(arg) } // Unroll splitArgs to avoid runtime/heap issues a := [argsLenMax][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 3: nc.ps.ma.subject = args[0] nc.ps.ma.sid = parseInt64(args[1]) nc.ps.ma.reply = nil nc.ps.ma.size = int(parseInt64(args[2])) case 4: nc.ps.ma.subject = args[0] nc.ps.ma.sid = parseInt64(args[1]) nc.ps.ma.reply = args[2] nc.ps.ma.size = int(parseInt64(args[3])) default: return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) } if nc.ps.ma.sid < 0 { return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) } if nc.ps.ma.size < 0 { return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) } return nil } // processHeaderMsgArgs is for a header based message. func (nc *Conn) processHeaderMsgArgs(arg []byte) error { // Unroll splitArgs to avoid runtime/heap issues a := [argsLenMax][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } switch len(args) { case 4: nc.ps.ma.subject = args[0] nc.ps.ma.sid = parseInt64(args[1]) nc.ps.ma.reply = nil nc.ps.ma.hdr = int(parseInt64(args[2])) nc.ps.ma.size = int(parseInt64(args[3])) case 5: nc.ps.ma.subject = args[0] nc.ps.ma.sid = parseInt64(args[1]) nc.ps.ma.reply = args[2] nc.ps.ma.hdr = int(parseInt64(args[3])) nc.ps.ma.size = int(parseInt64(args[4])) default: return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg) } if nc.ps.ma.sid < 0 { return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg) } if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size { return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) } if nc.ps.ma.size < 0 { return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg) } return nil } // ASCII numbers 0-9 const ( ascii_0 = 48 ascii_9 = 57 ) // parseInt64 expects decimal positive numbers. We // return -1 to signal error func parseInt64(d []byte) (n int64) { if len(d) == 0 { return -1 } for _, dec := range d { if dec < ascii_0 || dec > ascii_9 { return -1 } n = n*10 + (int64(dec) - ascii_0) } return n } nats.go-1.41.0/scripts/000077500000000000000000000000001477351342400146475ustar00rootroot00000000000000nats.go-1.41.0/scripts/cov.sh000077500000000000000000000026141477351342400160000ustar00rootroot00000000000000#!/bin/bash -e # Run from directory above via ./scripts/cov.sh rm -rf ./cov mkdir cov go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/nats.out . -tags=skip_no_race_tests go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/test.out -coverpkg=github.com/nats-io/nats.go ./test -tags=skip_no_race_tests,internal_testing go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/jetstream.out -coverpkg=github.com/nats-io/nats.go/jetstream ./jetstream/... go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/service.out -coverpkg=github.com/nats-io/nats.go/micro ./micro/... go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/builtin.out -coverpkg=github.com/nats-io/nats.go/encoders/builtin ./test -run EncBuiltin -tags=skip_no_race_tests go test -modfile=go_test.mod --failfast -vet=off -v -covermode=atomic -coverprofile=./cov/protobuf.out -coverpkg=github.com/nats-io/nats.go/encoders/protobuf ./test -run EncProto -tags=skip_no_race_tests gocovmerge ./cov/*.out > acc.out rm -rf ./cov # Without argument, launch browser results. We are going to push to coveralls only # from ci.yml and after success of the build (and result of pushing will not affect # build result). if [[ $1 == "" ]]; then go tool cover -html=acc.out fi nats.go-1.41.0/test/000077500000000000000000000000001477351342400141375ustar00rootroot00000000000000nats.go-1.41.0/test/auth_test.go000066400000000000000000000305551477351342400164760ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "io/fs" "net" "os" "strings" "sync/atomic" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" ) func TestAuth(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 8232 opts.Username = "derek" opts.Password = "foo" s := RunServerWithOptions(&opts) defer s.Shutdown() _, err := nats.Connect("nats://127.0.0.1:8232") if err == nil { t.Fatal("Should have received an error while trying to connect") } // This test may be a bit too strict for the future, but for now makes // sure that we correctly process the -ERR content on connect. if strings.ToLower(err.Error()) != nats.ErrAuthorization.Error() { t.Fatalf("Expected error '%v', got '%v'", nats.ErrAuthorization, err) } if !errors.Is(err, nats.ErrAuthorization) { t.Fatalf("Expected error '%v', got '%v'", nats.ErrAuthorization, err) } nc, err := nats.Connect("nats://derek:foo@127.0.0.1:8232") if err != nil { t.Fatal("Should have connected successfully with a token") } nc.Close() // Use Options nc, err = nats.Connect("nats://127.0.0.1:8232", nats.UserInfo("derek", "foo")) if err != nil { t.Fatalf("Should have connected successfully with a token: %v", err) } nc.Close() // Verify that credentials in URL take precedence. nc, err = nats.Connect("nats://derek:foo@127.0.0.1:8232", nats.UserInfo("foo", "bar")) if err != nil { t.Fatalf("Should have connected successfully with a token: %v", err) } nc.Close() } func TestAuthFailNoDisconnectErrCB(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 8232 opts.Username = "derek" opts.Password = "foo" s := RunServerWithOptions(&opts) defer s.Shutdown() copts := nats.GetDefaultOptions() copts.Url = "nats://127.0.0.1:8232" receivedDisconnectErrCB := int32(0) copts.DisconnectedErrCB = func(nc *nats.Conn, _ error) { atomic.AddInt32(&receivedDisconnectErrCB, 1) } _, err := copts.Connect() if err == nil { t.Fatal("Should have received an error while trying to connect") } if atomic.LoadInt32(&receivedDisconnectErrCB) > 0 { t.Fatal("Should not have received a disconnect callback on auth failure") } } func TestAuthFailAllowReconnect(t *testing.T) { ts := RunServerOnPort(23232) defer ts.Shutdown() var servers = []string{ "nats://127.0.0.1:23232", "nats://127.0.0.1:23233", "nats://127.0.0.1:23234", } opts2 := test.DefaultTestOptions opts2.Port = 23233 opts2.Username = "ivan" opts2.Password = "foo" ts2 := RunServerWithOptions(&opts2) defer ts2.Shutdown() ts3 := RunServerOnPort(23234) defer ts3.Shutdown() reconnectch := make(chan bool) defer close(reconnectch) copts := nats.GetDefaultOptions() copts.Servers = servers copts.AllowReconnect = true copts.NoRandomize = true copts.MaxReconnect = 10 copts.ReconnectWait = 100 * time.Millisecond nats.ReconnectJitter(0, 0)(&copts) copts.ReconnectedCB = func(_ *nats.Conn) { reconnectch <- true } // Connect nc, err := copts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) // Stop the server ts.Shutdown() // The client will try to connect to the second server, and that // should fail. It should then try to connect to the third and succeed. // Wait for the reconnect CB. if e := Wait(reconnectch); e != nil { t.Fatal("Reconnect callback should have been triggered") } if nc.IsClosed() { t.Fatal("Should have reconnected") } if nc.ConnectedUrl() != servers[2] { t.Fatalf("Should have reconnected to %s, reconnected to %s instead", servers[2], nc.ConnectedUrl()) } } func TestTokenHandlerReconnect(t *testing.T) { var servers = []string{ "nats://127.0.0.1:8232", "nats://127.0.0.1:8233", } ts := RunServerOnPort(8232) defer ts.Shutdown() opts2 := test.DefaultTestOptions opts2.Port = 8233 secret := "S3Cr3T0k3n!" opts2.Authorization = secret ts2 := RunServerWithOptions(&opts2) defer ts2.Shutdown() reconnectch := make(chan bool) defer close(reconnectch) copts := nats.GetDefaultOptions() copts.Servers = servers copts.AllowReconnect = true copts.NoRandomize = true copts.MaxReconnect = 10 copts.ReconnectWait = 100 * time.Millisecond nats.ReconnectJitter(0, 0)(&copts) copts.TokenHandler = func() string { return secret } copts.ReconnectedCB = func(_ *nats.Conn) { reconnectch <- true } // Connect nc, err := copts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() // Stop the server ts.Shutdown() // The client will try to connect to the second server and succeed. // Wait for the reconnect CB. if e := Wait(reconnectch); e != nil { t.Fatal("Reconnect callback should have been triggered") } if nc.IsClosed() { t.Fatal("Should have reconnected") } if nc.ConnectedUrl() != servers[1] { t.Fatalf("Should have reconnected to %s, reconnected to %s instead", servers[1], nc.ConnectedUrl()) } } func TestTokenAuth(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 8232 secret := "S3Cr3T0k3n!" opts.Authorization = secret s := RunServerWithOptions(&opts) defer s.Shutdown() _, err := nats.Connect("nats://127.0.0.1:8232") if err == nil { t.Fatal("Should have received an error while trying to connect") } tokenURL := fmt.Sprintf("nats://%s@127.0.0.1:8232", secret) nc, err := nats.Connect(tokenURL) if err != nil { t.Fatal("Should have connected successfully") } nc.Close() // Use Options nc, err = nats.Connect("nats://127.0.0.1:8232", nats.Token(secret)) if err != nil { t.Fatalf("Should have connected successfully: %v", err) } nc.Close() // Verify that token cannot be set when token handler is provided. _, err = nats.Connect("nats://127.0.0.1:8232", nats.TokenHandler(func() string { return secret }), nats.Token(secret)) if err == nil { t.Fatal("Should have received an error while trying to connect") } // Verify that token handler cannot be provided when token is set. _, err = nats.Connect("nats://127.0.0.1:8232", nats.Token(secret), nats.TokenHandler(func() string { return secret })) if err == nil { t.Fatal("Should have received an error while trying to connect") } // Verify that token in the URL takes precedence. nc, err = nats.Connect(tokenURL, nats.Token("badtoken")) if err != nil { t.Fatalf("Should have connected successfully: %v", err) } nc.Close() } func TestTokenHandlerAuth(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 8232 secret := "S3Cr3T0k3n!" opts.Authorization = secret s := RunServerWithOptions(&opts) defer s.Shutdown() _, err := nats.Connect("nats://127.0.0.1:8232") if err == nil { t.Fatal("Should have received an error while trying to connect") } tokenURL := fmt.Sprintf("nats://%s@127.0.0.1:8232", secret) nc, err := nats.Connect(tokenURL) if err != nil { t.Fatal("Should have connected successfully") } nc.Close() // Use Options nc, err = nats.Connect("nats://127.0.0.1:8232", nats.TokenHandler(func() string { return secret })) if err != nil { t.Fatalf("Should have connected successfully: %v", err) } nc.Close() // Verify that token cannot be set when token handler is provided. _, err = nats.Connect("nats://127.0.0.1:8232", nats.TokenHandler(func() string { return secret }), nats.Token(secret)) if err == nil { t.Fatal("Should have received an error while trying to connect") } // Verify that token handler cannot be provided when token is set. _, err = nats.Connect("nats://127.0.0.1:8232", nats.Token(secret), nats.TokenHandler(func() string { return secret })) if err == nil { t.Fatal("Should have received an error while trying to connect") } // Verify that token handler cannot be provided when token is in URL. _, err = nats.Connect(tokenURL, nats.TokenHandler(func() string { return secret })) if err == nil { t.Fatal("Should have received an error while trying to connect") } } func TestPermViolation(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 8232 opts.Users = []*server.User{ { Username: "ivan", Password: "pwd", Permissions: &server.Permissions{ Publish: &server.SubjectPermission{Allow: []string{"Foo"}}, Subscribe: &server.SubjectPermission{Allow: []string{"Bar"}}, }, }, } s := RunServerWithOptions(&opts) defer s.Shutdown() errCh := make(chan error, 2) errCB := func(_ *nats.Conn, _ *nats.Subscription, err error) { errCh <- err } nc, err := nats.Connect( fmt.Sprintf("nats://ivan:pwd@127.0.0.1:%d", opts.Port), nats.ErrorHandler(errCB)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() // Cause a publish error nc.Publish("Bar", []byte("fail")) // Cause a subscribe error nc.Subscribe("Foo", func(_ *nats.Msg) {}) expectedErrorTypes := []string{"publish", "subscription"} for _, expectedErr := range expectedErrorTypes { select { case e := <-errCh: if !strings.Contains(strings.ToLower(e.Error()), nats.PERMISSIONS_ERR) { t.Fatalf("Did not receive error about permissions") } if !strings.Contains(strings.ToLower(e.Error()), expectedErr) { t.Fatalf("Did not receive error about %q, got %v", expectedErr, e.Error()) } // Make sure subject is not converted to lower case if expectedErr == "publish" && !strings.Contains(e.Error(), "Bar") { t.Fatalf("Subject Bar not found in error: %v", e) } else if expectedErr == "subscribe" && !strings.Contains(e.Error(), "Foo") { t.Fatalf("Subject Foo not found in error: %v", e) } case <-time.After(2 * time.Second): t.Fatalf("Did not get the permission error") } } // Make sure connection has not been closed if nc.IsClosed() { t.Fatal("Connection should be not be closed") } } func TestConnectMissingCreds(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() // Using TEST-NET sample address from RFC 5737. testnetaddr := "nats://192.0.2.2:4222" _, err := nats.Connect(fmt.Sprintf("%s,%s", s.ClientURL(), testnetaddr), nats.UserCredentials("missing"), nats.DontRandomize()) if !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Expected not exists error, got: %v", err) } } func TestUserInfoHandler(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 accounts: { A { users: [{ user: "pp", password: "foo" }] } } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer s.Shutdown() user, pass := "pp", "foo" userInfoCB := func() (string, string) { return user, pass } // check that we cannot set the user info twice _, err := nats.Connect(s.ClientURL(), nats.UserInfo("pp", "foo"), nats.UserInfoHandler(userInfoCB)) if !errors.Is(err, nats.ErrUserInfoAlreadySet) { t.Fatalf("Expected ErrUserInfoAlreadySet, got: %v", err) } addr, ok := s.Addr().(*net.TCPAddr) if !ok { t.Fatalf("Expected a TCP address, got %T", addr) } // check that user/pass from url takes precedence _, err = nats.Connect(fmt.Sprintf("nats://bad:bad@localhost:%d", addr.Port), nats.UserInfoHandler(userInfoCB)) if !errors.Is(err, nats.ErrAuthorization) { t.Fatalf("Expected ErrAuthorization, got: %v", err) } // connect using the handler nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(100*time.Millisecond), nats.UserInfoHandler(userInfoCB)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() // now change the password and reload the server newConfig := []byte(` listen: 127.0.0.1:-1 accounts: { A { users: [{ user: "dd", password: "bar" }] } } `) if err := os.WriteFile(conf, newConfig, 0666); err != nil { t.Fatalf("Error writing conf file: %v", err) } // update the user info used by the callback user, pass = "dd", "bar" status := nc.StatusChanged(nats.CONNECTED) if err := s.Reload(); err != nil { t.Fatalf("Error on reload: %v", err) } // we should get a reconnected event meaning the new credentials were used WaitOnChannel(t, status, nats.CONNECTED) } nats.go-1.41.0/test/basic_test.go000066400000000000000000000650471477351342400166220ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "context" "fmt" "math" "regexp" "runtime" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" ) // This function returns the number of go routines ensuring // that runtime.NumGoroutine() returns the same value // several times in a row with little delay between captures. // This will check for at most 2s for value to be stable. func getStableNumGoroutine(t *testing.T) int { t.Helper() timeout := time.Now().Add(2 * time.Second) var base, old, same int for time.Now().Before(timeout) { base = runtime.NumGoroutine() if old == base { same++ if same == 5 { return base } } else { same = 0 } old = base time.Sleep(50 * time.Millisecond) } t.Fatalf("Unable to get stable number of go routines") return 0 } func checkNoGoroutineLeak(t *testing.T, base int, action string) { t.Helper() waitFor(t, 2*time.Second, 100*time.Millisecond, func() error { delta := (runtime.NumGoroutine() - base) if delta > 0 { return fmt.Errorf("%d Go routines still exist after %s", delta, action) } return nil }) } // Check the error channel for an error and if one is present, // calls t.Fatal(e.Error()). Note that this supports tests that // send nil to the error channel and so report error only if // e is != nil. func checkErrChannel(t *testing.T, errCh chan error) { t.Helper() select { case e := <-errCh: if e != nil { t.Fatal(e.Error()) } default: } } func TestCloseLeakingGoRoutines(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() base := getStableNumGoroutine(t) nc := NewDefaultConnection(t) nc.Flush() nc.Close() checkNoGoroutineLeak(t, base, "Close()") // Make sure we can call Close() multiple times nc.Close() } func TestLeakingGoRoutinesOnFailedConnect(t *testing.T) { base := getStableNumGoroutine(t) nc, err := nats.Connect(nats.DefaultURL) if err == nil { nc.Close() t.Fatalf("Expected failure to connect") } checkNoGoroutineLeak(t, base, "failed connect") } func TestTLSConnectionStateNonTLS(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() _, err := nc.TLSConnectionState() if err != nats.ErrConnectionNotTLS { t.Fatalf("Expected a not tls error, got: %v", err) } } func TestConnectedServer(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() u := nc.ConnectedUrl() if u == "" || u != nats.DefaultURL { t.Fatalf("Unexpected connected URL of %s\n", u) } id := nc.ConnectedServerId() if id == "" { t.Fatalf("Expected a connected server id, got %s", id) } name := nc.ConnectedServerName() if name == "" { t.Fatalf("Expected a connected server name, got %s", name) } cname := nc.ConnectedClusterName() if cname == "" { t.Fatalf("Expected a connected server cluster name, got %s", cname) } nc.Close() u = nc.ConnectedUrl() if u != "" { t.Fatalf("Expected a nil connected URL, got %s\n", u) } id = nc.ConnectedServerId() if id != "" { t.Fatalf("Expected a nil connect server, got %s", id) } name = nc.ConnectedServerName() if name != "" { t.Fatalf("Expected a nil connect server name, got %s", name) } cname = nc.ConnectedClusterName() if cname != "" { t.Fatalf("Expected a nil connect server cluster, got %s", cname) } } func TestMultipleClose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { nc.Close() wg.Done() }() } wg.Wait() } func TestBadOptionTimeoutConnect(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() opts := nats.GetDefaultOptions() opts.Timeout = -1 opts.Url = "nats://127.0.0.1:4222" _, err := opts.Connect() if err == nil { t.Fatal("Expected an error") } if !strings.Contains(err.Error(), "invalid") { t.Fatalf("Expected a ErrNoServers error: Got %v\n", err) } } func TestSimplePublish(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() if err := nc.Publish("foo", []byte("Hello World")); err != nil { t.Fatal("Failed to publish string message: ", err) } } func TestSimplePublishNoData(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() if err := nc.Publish("foo", nil); err != nil { t.Fatal("Failed to publish empty message: ", err) } } func TestPublishDoesNotFailOnSlowConsumer(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Unable to create subscription: %v", err) } if err := sub.SetPendingLimits(1, 1000); err != nil { t.Fatalf("Unable to set pending limits: %v", err) } var pubErr error msg := []byte("Hello") for i := 0; i < 10; i++ { pubErr = nc.Publish("foo", msg) if pubErr != nil { break } nc.Flush() } if pubErr != nil { t.Fatalf("Publish() should not fail because of slow consumer. Got '%v'", pubErr) } } func TestAsyncSubscribe(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() omsg := []byte("Hello World") ch := make(chan bool) // Callback is mandatory if _, err := nc.Subscribe("foo", nil); err == nil { t.Fatal("Creating subscription without callback should have failed") } _, err := nc.Subscribe("foo", func(m *nats.Msg) { if !bytes.Equal(m.Data, omsg) { t.Fatal("Message received does not match") } if m.Sub == nil { t.Fatal("Callback does not have a valid Subscription") } ch <- true }) if err != nil { t.Fatal("Failed to subscribe: ", err) } nc.Publish("foo", omsg) if e := Wait(ch); e != nil { t.Fatal("Message not received for subscription") } } func TestAsyncSubscribeRoutineLeakOnUnsubscribe(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ch := make(chan bool) // Take the base once the connection is established, but before // the subscriber is created. base := getStableNumGoroutine(t) sub, err := nc.Subscribe("foo", func(m *nats.Msg) { ch <- true }) if err != nil { t.Fatal("Failed to subscribe: ", err) } // Send to ourself nc.Publish("foo", []byte("hello")) // This ensures that the async delivery routine is up and running. if err := Wait(ch); err != nil { t.Fatal("Failed to receive message") } // Make sure to give it time to go back into wait time.Sleep(200 * time.Millisecond) // Explicit unsubscribe sub.Unsubscribe() checkNoGoroutineLeak(t, base, "Unsubscribe()") } func TestAsyncSubscribeRoutineLeakOnClose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ch := make(chan bool) // Take the base before creating the connection, since we are going // to close it before taking the delta. base := getStableNumGoroutine(t) nc := NewDefaultConnection(t) defer nc.Close() _, err := nc.Subscribe("foo", func(m *nats.Msg) { ch <- true }) if err != nil { t.Fatal("Failed to subscribe: ", err) } // Send to ourself nc.Publish("foo", []byte("hello")) // This ensures that the async delivery routine is up and running. if err := Wait(ch); err != nil { t.Fatal("Failed to receive message") } // Make sure to give it time to go back into wait time.Sleep(200 * time.Millisecond) // Close connection without explicit unsubscribe nc.Close() checkNoGoroutineLeak(t, base, "Close()") } func TestSyncSubscribe(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } omsg := []byte("Hello World") nc.Publish("foo", omsg) msg, err := sub.NextMsg(1 * time.Second) if err != nil || !bytes.Equal(msg.Data, omsg) { t.Fatal("Message received does not match") } } func TestPubSubWithReply(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } omsg := []byte("Hello World") nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: "bar", Data: omsg}) msg, err := sub.NextMsg(10 * time.Second) if err != nil || !bytes.Equal(msg.Data, omsg) { t.Fatal("Message received does not match") } } func TestMsgRespond(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() m := &nats.Msg{} if err := m.Respond(nil); err != nats.ErrMsgNotBound { t.Fatal("Expected ErrMsgNotBound error") } sub, err := nc.Subscribe("req", func(msg *nats.Msg) { msg.Respond([]byte("42")) }) if err != nil { t.Fatal("Failed to subscribe: ", err) } // Fake the bound notion by assigning Sub directly to test no reply. m.Sub = sub if err := m.Respond(nil); err != nats.ErrMsgNoReply { t.Fatal("Expected ErrMsgNoReply error") } response, err := nc.Request("req", []byte("help"), 50*time.Millisecond) if err != nil { t.Fatal("Request Failed: ", err) } if string(response.Data) != "42" { t.Fatalf("Expected '42', got %q", response.Data) } } func TestFlush(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() omsg := []byte("Hello World") for i := 0; i < 10000; i++ { nc.Publish("flush", omsg) } if err := nc.FlushTimeout(0); err == nil { t.Fatal("Calling FlushTimeout() with invalid timeout should fail") } if err := nc.Flush(); err != nil { t.Fatalf("Received error from flush: %s\n", err) } if nb, _ := nc.Buffered(); nb > 0 { t.Fatalf("Outbound buffer not empty: %d bytes\n", nb) } nc.Close() if _, err := nc.Buffered(); err == nil { t.Fatal("Calling Buffered() on closed connection should fail") } } func TestQueueSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() s1, _ := nc.QueueSubscribeSync("foo", "bar") s2, _ := nc.QueueSubscribeSync("foo", "bar") omsg := []byte("Hello World") nc.Publish("foo", omsg) nc.Flush() r1, _, _ := s1.Pending() r2, _, _ := s2.Pending() if (r1 + r2) != 1 { t.Fatal("Received too many messages for multiple queue subscribers") } // Drain messages s1.NextMsg(time.Second) s2.NextMsg(time.Second) total := 1000 for i := 0; i < total; i++ { nc.Publish("foo", omsg) } nc.Flush() v := uint(float32(total) * 0.15) r1, _, _ = s1.Pending() r2, _, _ = s2.Pending() if r1+r2 != total { t.Fatalf("Incorrect number of messages: %d vs %d", (r1 + r2), total) } expected := total / 2 d1 := uint(math.Abs(float64(expected - r1))) d2 := uint(math.Abs(float64(expected - r2))) if d1 > v || d2 > v { t.Fatalf("Too much variance in totals: %d, %d > %d", d1, d2, v) } } func TestReplyArg(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ch := make(chan bool) replyExpected := "bar" nc.Subscribe("foo", func(m *nats.Msg) { if m.Reply != replyExpected { t.Fatalf("Did not receive correct reply arg in callback: "+ "('%s' vs '%s')", m.Reply, replyExpected) } ch <- true }) nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: replyExpected, Data: []byte("Hello")}) if e := Wait(ch); e != nil { t.Fatal("Did not receive callback") } } func TestSyncReplyArg(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() replyExpected := "bar" sub, _ := nc.SubscribeSync("foo") nc.PublishMsg(&nats.Msg{Subject: "foo", Reply: replyExpected, Data: []byte("Hello")}) msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatal("Received an err on NextMsg()") } if msg.Reply != replyExpected { t.Fatalf("Did not receive correct reply arg in callback: "+ "('%s' vs '%s')", msg.Reply, replyExpected) } } func TestUnsubscribe(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() received := int32(0) max := int32(10) ch := make(chan bool) nc.Subscribe("foo", func(m *nats.Msg) { atomic.AddInt32(&received, 1) if received == max { err := m.Sub.Unsubscribe() if err != nil { t.Fatal("Unsubscribe failed with err:", err) } ch <- true } }) send := 20 for i := 0; i < send; i++ { nc.Publish("foo", []byte("hello")) } nc.Flush() <-ch r := atomic.LoadInt32(&received) if r != max { t.Fatalf("Received wrong # of messages after unsubscribe: %d vs %d", r, max) } } func TestDoubleUnsubscribe(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } if err = sub.Unsubscribe(); err != nil { t.Fatal("Unsubscribe failed with err:", err) } if err = sub.Unsubscribe(); err == nil { t.Fatal("Unsubscribe should have reported an error") } } func TestRequestTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // We now need a responder by default otherwise we will get a no responders error. nc.SubscribeSync("foo") if _, err := nc.Request("foo", []byte("help"), 10*time.Millisecond); err != nats.ErrTimeout { t.Fatalf("Expected to receive a timeout error") } } func TestBasicNoRespondersSupport(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() // Normal new style if m, err := nc.Request("foo", nil, time.Second); err != nats.ErrNoResponders { t.Fatalf("Expected a no responders error and nil msg, got m:%+v and err: %v", m, err) } // New style with context ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if m, err := nc.RequestWithContext(ctx, "foo", nil); err != nats.ErrNoResponders { t.Fatalf("Expected a no responders error and nil msg, got m:%+v and err: %v", m, err) } // Now do old request style as well. nc, err = nats.Connect(s.ClientURL(), nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() // Normal old request style if m, err := nc.Request("foo", nil, time.Second); err != nats.ErrNoResponders { t.Fatalf("Expected a no responders error and nil msg, got m:%+v and err: %v", m, err) } // Old request style with context ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() if m, err := nc.RequestWithContext(ctx, "foo", nil); err != nats.ErrNoResponders { t.Fatalf("Expected a no responders error and nil msg, got m:%+v and err: %v", m, err) } // SubscribeSync inbox := nats.NewInbox() sub, err := nc.SubscribeSync(inbox) if err != nil { t.Fatal(err) } err = nc.PublishRequest("foo", inbox, nil) if err != nil { t.Fatal(err) } if m, err := sub.NextMsg(2 * time.Second); err != nats.ErrNoResponders { t.Fatalf("Expected a no responders error and nil msg, got m:%+v and err: %v", m, err) } } func TestOldRequest(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { m.Respond(response) }) msg, err := nc.Request("foo", []byte("help"), 500*time.Millisecond) if err != nil { t.Fatalf("Received an error on Request test: %s", err) } if !bytes.Equal(msg.Data, response) { t.Fatalf("Received invalid response") } // Check that Close() kicks out a Request() errCh := make(chan error, 1) start := time.Now() go func() { sub, _ := nc.SubscribeSync("checkClose") defer sub.Unsubscribe() _, err := nc.Request("checkClose", []byte("should be kicked out on close"), time.Second) errCh <- err }() time.Sleep(100 * time.Millisecond) nc.Close() if e := <-errCh; e != nats.ErrConnectionClosed { t.Fatalf("Unexpected error: %v", e) } if dur := time.Since(start); dur >= time.Second { t.Fatalf("Request took too long to bail out: %v", dur) } } func TestRequest(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, response) }) msg, err := nc.Request("foo", []byte("help"), 500*time.Millisecond) if err != nil { t.Fatalf("Received an error on Request test: %s", err) } if !bytes.Equal(msg.Data, response) { t.Fatalf("Received invalid response") } } func TestRequestNoBody(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, response) }) msg, err := nc.Request("foo", nil, 500*time.Millisecond) if err != nil { t.Fatalf("Received an error on Request test: %s", err) } if !bytes.Equal(msg.Data, response) { t.Fatalf("Received invalid response") } } func TestSimultaneousRequests(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, response) }) wg := sync.WaitGroup{} wg.Add(50) errCh := make(chan error, 50) for i := 0; i < 50; i++ { go func() { defer wg.Done() if _, err := nc.Request("foo", nil, 2*time.Second); err != nil { errCh <- fmt.Errorf("Error on request: %v", err) } }() } wg.Wait() checkErrChannel(t, errCh) } func TestRequestClose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() time.Sleep(100 * time.Millisecond) nc.Close() }() nc.SubscribeSync("foo") if _, err := nc.Request("foo", []byte("help"), 2*time.Second); err != nats.ErrInvalidConnection && err != nats.ErrConnectionClosed { t.Fatalf("Expected connection error: got %v", err) } wg.Wait() } func TestRequestCloseTimeout(t *testing.T) { // Make sure we return a timeout when we close // the connection even if response is queued. s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, response) nc.Close() }) if _, err := nc.Request("foo", nil, 1*time.Second); err == nil { t.Fatalf("Expected to receive a timeout error") } } func TestFlushInCB(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ch := make(chan bool) nc.Subscribe("foo", func(_ *nats.Msg) { nc.Flush() ch <- true }) nc.Publish("foo", []byte("Hello")) if e := Wait(ch); e != nil { t.Fatal("Flush did not return properly in callback") } } func TestReleaseFlush(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) for i := 0; i < 1000; i++ { nc.Publish("foo", []byte("Hello")) } go nc.Close() nc.Flush() } func TestInbox(t *testing.T) { inbox := nats.NewInbox() if matched, _ := regexp.Match(`_INBOX.\S`, []byte(inbox)); !matched { t.Fatal("Bad INBOX format") } } func TestStats(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() data := []byte("The quick brown fox jumped over the lazy dog") iter := 10 for i := 0; i < iter; i++ { nc.Publish("foo", data) } if nc.OutMsgs != uint64(iter) { t.Fatalf("Not properly tracking OutMsgs: received %d, wanted %d\n", nc.OutMsgs, iter) } obb := uint64(iter * len(data)) if nc.OutBytes != obb { t.Fatalf("Not properly tracking OutBytes: received %d, wanted %d\n", nc.OutBytes, obb) } // Clear outbound nc.OutMsgs, nc.OutBytes = 0, 0 // Test both sync and async versions of subscribe. nc.Subscribe("foo", func(_ *nats.Msg) {}) nc.SubscribeSync("foo") for i := 0; i < iter; i++ { nc.Publish("foo", data) } nc.Flush() if nc.InMsgs != uint64(2*iter) { t.Fatalf("Not properly tracking InMsgs: received %d, wanted %d\n", nc.InMsgs, 2*iter) } ibb := 2 * obb if nc.InBytes != ibb { t.Fatalf("Not properly tracking InBytes: received %d, wanted %d\n", nc.InBytes, ibb) } } func TestRaceSafeStats(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() go nc.Publish("foo", []byte("Hello World")) time.Sleep(200 * time.Millisecond) stats := nc.Stats() if stats.OutMsgs != uint64(1) { t.Fatalf("Not properly tracking OutMsgs: received %d, wanted %d\n", nc.OutMsgs, 1) } } func TestBadSubject(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() err := nc.Publish("", []byte("Hello World")) if err == nil { t.Fatalf("Expected an error on bad subject to publish") } if err != nats.ErrBadSubject { t.Fatalf("Expected a ErrBadSubject error: Got %v\n", err) } } func TestOptions(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.Name("myName"), nats.MaxReconnects(2), nats.ReconnectWait(50*time.Millisecond), nats.ReconnectJitter(0, 0), nats.PingInterval(20*time.Millisecond)) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() rch := make(chan bool) cch := make(chan bool) nc.SetReconnectHandler(func(_ *nats.Conn) { rch <- true }) nc.SetClosedHandler(func(_ *nats.Conn) { cch <- true }) s.Shutdown() s = RunDefaultServer() defer s.Shutdown() if err := Wait(rch); err != nil { t.Fatal("Failed getting reconnected cb") } nc.Close() if err := Wait(cch); err != nil { t.Fatal("Failed getting closed cb") } nc, err = nats.Connect(nats.DefaultURL, nats.NoReconnect()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() nc.SetReconnectHandler(func(_ *nats.Conn) { rch <- true }) nc.SetClosedHandler(func(_ *nats.Conn) { cch <- true }) s.Shutdown() // We should not get a reconnect cb this time if err := WaitTime(rch, time.Second); err == nil { t.Fatal("Unexpected reconnect cb") } nc.Close() if err := Wait(cch); err != nil { t.Fatal("Failed getting closed cb") } } func TestNilConnection(t *testing.T) { var nc *nats.Conn data := []byte("ok") // Publish if err := nc.Publish("foo", data); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if err := nc.PublishMsg(nil); err == nil || err != nats.ErrInvalidMsg { t.Fatalf("Expected ErrInvalidMsg error, got %v\n", err) } if err := nc.PublishMsg(&nats.Msg{}); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if err := nc.PublishRequest("foo", "reply", data); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } // Subscribe if _, err := nc.Subscribe("foo", nil); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if _, err := nc.SubscribeSync("foo"); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if _, err := nc.QueueSubscribe("foo", "bar", nil); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } ch := make(chan *nats.Msg) if _, err := nc.ChanSubscribe("foo", ch); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if _, err := nc.ChanQueueSubscribe("foo", "bar", ch); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if _, err := nc.QueueSubscribeSyncWithChan("foo", "bar", ch); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } // Flush if err := nc.Flush(); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } if err := nc.FlushTimeout(time.Millisecond); err == nil || err != nats.ErrInvalidConnection { t.Fatalf("Expected ErrInvalidConnection error, got %v\n", err) } // Nil Subscribers var sub *nats.Subscription if sub.Type() != nats.NilSubscription { t.Fatalf("Got wrong type for nil subscription, %v\n", sub.Type()) } if sub.IsValid() { t.Fatalf("Expected IsValid() to return false") } if err := sub.Unsubscribe(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected Unsubscribe to return proper error, got %v\n", err) } if err := sub.AutoUnsubscribe(1); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, err := sub.NextMsg(time.Millisecond); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, _, err := sub.Pending(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, _, err := sub.Pending(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, _, err := sub.MaxPending(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if err := sub.ClearMaxPending(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, _, err := sub.PendingLimits(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if err := sub.SetPendingLimits(1, 1); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, err := sub.Delivered(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } if _, err := sub.Dropped(); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected ErrBadSubscription error, got %v\n", err) } } nats.go-1.41.0/test/bench_test.go000066400000000000000000000073271477351342400166150ustar00rootroot00000000000000// Copyright 2012-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" ) func BenchmarkPublishSpeed(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() b.StartTimer() msg := []byte("Hello World") for i := 0; i < b.N; i++ { if err := nc.Publish("foo", msg); err != nil { b.Fatalf("Error in benchmark during Publish: %v\n", err) } } // Make sure they are all processed. nc.Flush() b.StopTimer() } func BenchmarkPubSubSpeed(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() ch := make(chan bool) nc.SetErrorHandler(func(nc *nats.Conn, s *nats.Subscription, err error) { b.Fatalf("Error : %v\n", err) }) received := int32(0) nc.Subscribe("foo", func(m *nats.Msg) { if nr := atomic.AddInt32(&received, 1); nr >= int32(b.N) { ch <- true } }) msg := []byte("Hello World") b.StartTimer() for i := 0; i < b.N; i++ { if err := nc.Publish("foo", msg); err != nil { b.Fatalf("Error in benchmark during Publish: %v\n", err) } } // Make sure they are all processed. err := WaitTime(ch, 10*time.Second) if err != nil { b.Fatal("Timed out waiting for messages") } else if atomic.LoadInt32(&received) != int32(b.N) { b.Fatalf("Received: %d, err:%v", received, nc.LastError()) } b.StopTimer() } func BenchmarkAsyncSubscriptionCreationSpeed(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() b.StartTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { nc.Subscribe("foo", func(m *nats.Msg) {}) } } func BenchmarkSyncSubscriptionCreationSpeed(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() b.StartTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { nc.SubscribeSync("foo") } } func BenchmarkInboxCreation(b *testing.B) { for i := 0; i < b.N; i++ { nats.NewInbox() } } func BenchmarkNewInboxCreation(b *testing.B) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() b.ResetTimer() for i := 0; i < b.N; i++ { nc.NewRespInbox() } } func BenchmarkRequest(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(b) defer nc.Close() ok := []byte("ok") nc.Subscribe("req", func(m *nats.Msg) { nc.Publish(m.Reply, ok) }) b.StartTimer() b.ReportAllocs() q := []byte("q") for i := 0; i < b.N; i++ { _, err := nc.Request("req", q, 1*time.Second) if err != nil { b.Fatalf("Err %v\n", err) } } } func BenchmarkOldRequest(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { b.Fatalf("Failed to connect: %v", err) } defer nc.Close() ok := []byte("ok") nc.Subscribe("req", func(m *nats.Msg) { nc.Publish(m.Reply, ok) }) b.StartTimer() b.ReportAllocs() q := []byte("q") for i := 0; i < b.N; i++ { _, err := nc.Request("req", q, 1*time.Second) if err != nil { b.Fatalf("Err %v\n", err) } } } nats.go-1.41.0/test/cluster_test.go000066400000000000000000000554251477351342400172210ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "math" "net" "runtime" "strings" "sync" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" ) var testServers = []string{ "nats://127.0.0.1:1222", "nats://127.0.0.1:1223", "nats://127.0.0.1:1224", "nats://127.0.0.1:1225", "nats://127.0.0.1:1226", "nats://127.0.0.1:1227", "nats://127.0.0.1:1228", } var servers = strings.Join(testServers, ",") func serverVersionAtLeast(major, minor, update int) error { var ( ma, mi, up int ) fmt.Sscanf(server.VERSION, "%d.%d.%d", &ma, &mi, &up) if ma > major || (ma == major && mi > minor) || (ma == major && mi == minor && up >= update) { return nil } return fmt.Errorf("Server version is %v, requires %d.%d.%d+", server.VERSION, major, minor, update) } func TestServersOption(t *testing.T) { opts := nats.GetDefaultOptions() opts.NoRandomize = true // Need to lower this for Windows tests, otherwise would take too long. opts.Timeout = 100 * time.Millisecond // When getting "connection refused", we transform to ErrNoServers. // However, on Windows, the connect() will get a i/o timeout, but // we can't really suppress that one since we don't know if it is // a real timeout or a failure to connect. So check differencly. _, err := opts.Connect() if runtime.GOOS == "windows" { if err == nil || !strings.Contains(err.Error(), "timeout") { t.Fatalf("Expected timeout, got %v", err) } } else if err != nats.ErrNoServers { t.Fatalf("Wrong error: '%v'", err) } opts.Servers = testServers _, err = opts.Connect() if runtime.GOOS == "windows" { if err == nil || !strings.Contains(err.Error(), "timeout") { t.Fatalf("Expected timeout, got %v", err) } } else if err == nil || err != nats.ErrNoServers { t.Fatalf("Did not receive proper error: %v", err) } // Make sure we can connect to first server if running s1 := RunServerOnPort(1222) // Do this in case some failure occurs before explicit shutdown defer s1.Shutdown() nc, err := opts.Connect() if err != nil { t.Fatalf("Could not connect: %v\n", err) } if nc.ConnectedUrl() != "nats://127.0.0.1:1222" { nc.Close() t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } nc.Close() s1.Shutdown() // Make sure we can connect to a non first server if running s2 := RunServerOnPort(1223) // Do this in case some failure occurs before explicit shutdown defer s2.Shutdown() nc, err = opts.Connect() if err != nil { t.Fatalf("Could not connect: %v\n", err) } defer nc.Close() if nc.ConnectedUrl() != "nats://127.0.0.1:1223" { t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } } func TestNewStyleServersOption(t *testing.T) { _, err := nats.Connect(nats.DefaultURL, nats.DontRandomize(), nats.Timeout(100*time.Millisecond)) if runtime.GOOS == "windows" { if err == nil || !strings.Contains(err.Error(), "timeout") { t.Fatalf("Expected timeout, got %v", err) } } else if err != nats.ErrNoServers { t.Fatalf("Wrong error: '%v'\n", err) } servers := strings.Join(testServers, ",") _, err = nats.Connect(servers, nats.DontRandomize(), nats.Timeout(100*time.Millisecond)) if runtime.GOOS == "windows" { if err == nil || !strings.Contains(err.Error(), "timeout") { t.Fatalf("Expected timeout, got %v", err) } } else if err == nil || err != nats.ErrNoServers { t.Fatalf("Did not receive proper error: %v\n", err) } // Make sure we can connect to first server if running s1 := RunServerOnPort(1222) // Do this in case some failure occurs before explicit shutdown defer s1.Shutdown() nc, err := nats.Connect(servers, nats.DontRandomize(), nats.Timeout(100*time.Millisecond)) if err != nil { t.Fatalf("Could not connect: %v\n", err) } if nc.ConnectedUrl() != "nats://127.0.0.1:1222" { nc.Close() t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } nc.Close() s1.Shutdown() // Make sure we can connect to a non-first server if running s2 := RunServerOnPort(1223) // Do this in case some failure occurs before explicit shutdown defer s2.Shutdown() nc, err = nats.Connect(servers, nats.DontRandomize(), nats.Timeout(100*time.Millisecond)) if err != nil { t.Fatalf("Could not connect: %v\n", err) } defer nc.Close() if nc.ConnectedUrl() != "nats://127.0.0.1:1223" { t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } } func TestAuthServers(t *testing.T) { var plainServers = []string{ "nats://127.0.0.1:1222", "nats://127.0.0.1:1224", } opts := test.DefaultTestOptions opts.Username = "derek" opts.Password = "foo" opts.Port = 1222 as1 := RunServerWithOptions(&opts) defer as1.Shutdown() opts.Port = 1224 as2 := RunServerWithOptions(&opts) defer as2.Shutdown() pservers := strings.Join(plainServers, ",") nc, err := nats.Connect(pservers, nats.DontRandomize(), nats.Timeout(5*time.Second)) if err == nil { nc.Close() t.Fatalf("Expect Auth failure, got no error\n") } if !strings.Contains(err.Error(), "Authorization") { t.Fatalf("Wrong error, wanted Auth failure, got '%s'\n", err) } if !errors.Is(err, nats.ErrAuthorization) { t.Fatalf("Expected error '%v', got '%v'", nats.ErrAuthorization, err) } // Test that we can connect to a subsequent correct server. var authServers = []string{ "nats://127.0.0.1:1222", "nats://derek:foo@127.0.0.1:1224", } aservers := strings.Join(authServers, ",") nc, err = nats.Connect(aservers, nats.DontRandomize(), nats.Timeout(5*time.Second)) if err != nil { t.Fatalf("Expected to connect properly: %v\n", err) } defer nc.Close() if nc.ConnectedUrl() != authServers[1] { t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } } func TestBasicClusterReconnect(t *testing.T) { s1 := RunServerOnPort(1222) defer s1.Shutdown() s2 := RunServerOnPort(1224) defer s2.Shutdown() dch := make(chan bool) rch := make(chan bool) dcbCalled := false opts := []nats.Option{nats.DontRandomize(), nats.Timeout(100 * time.Millisecond), nats.DisconnectErrHandler(func(nc *nats.Conn, _ error) { // Suppress any additional callbacks if dcbCalled { return } dcbCalled = true dch <- true }), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }), } nc, err := nats.Connect(servers, opts...) if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // wait for disconnect if e := WaitTime(dch, 2*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } reconnectTimeStart := time.Now() // wait for reconnect if e := WaitTime(rch, 2*time.Second); e != nil { t.Fatal("Did not receive a reconnect callback message") } if nc.ConnectedUrl() != testServers[2] { t.Fatalf("Does not report correct connection: %s\n", nc.ConnectedUrl()) } // Make sure we did not wait on reconnect for default time. // Reconnect should be fast since it will be a switch to the // second server and not be dependent on server restart time. // On Windows, a failed connect takes more than a second, so // account for that. maxDuration := 100 * time.Millisecond if runtime.GOOS == "windows" { maxDuration = 1100 * time.Millisecond } reconnectTime := time.Since(reconnectTimeStart) if reconnectTime > maxDuration { t.Fatalf("Took longer than expected to reconnect: %v\n", reconnectTime) } } func TestHotSpotReconnect(t *testing.T) { s1 := RunServerOnPort(1222) defer s1.Shutdown() numClients := 32 clients := []*nats.Conn{} wg := &sync.WaitGroup{} wg.Add(numClients) opts := []nats.Option{ nats.ReconnectWait(50 * time.Millisecond), nats.ReconnectJitter(0, 0), nats.ReconnectHandler(func(_ *nats.Conn) { wg.Done() }), } var srvrs string if runtime.GOOS == "windows" { srvrs = strings.Join(testServers[:5], ",") opts = append(opts, nats.Timeout(100*time.Millisecond)) } else { srvrs = servers } for i := 0; i < numClients; i++ { nc, err := nats.Connect(srvrs, opts...) if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() if nc.ConnectedUrl() != testServers[0] { t.Fatalf("Connected to incorrect server: %v\n", nc.ConnectedUrl()) } clients = append(clients, nc) } s2 := RunServerOnPort(1224) defer s2.Shutdown() s3 := RunServerOnPort(1226) defer s3.Shutdown() s1.Shutdown() numServers := 2 // Wait on all reconnects wg.Wait() // Walk the clients and calculate how many of each.. cs := make(map[string]int) for _, nc := range clients { cs[nc.ConnectedUrl()]++ nc.Close() } if len(cs) != numServers { t.Fatalf("Wrong number of reported servers: %d vs %d\n", len(cs), numServers) } expected := numClients / numServers v := uint(float32(expected) * 0.40) // Check that each item is within acceptable range for s, total := range cs { delta := uint(math.Abs(float64(expected - total))) if delta > v { t.Fatalf("Connected clients to server: %s out of range: %d\n", s, total) } } } func TestProperReconnectDelay(t *testing.T) { s1 := RunServerOnPort(1222) defer s1.Shutdown() var srvs string opts := nats.GetDefaultOptions() if runtime.GOOS == "windows" { srvs = strings.Join(testServers[:2], ",") } else { srvs = strings.Join(testServers, ",") } opts.NoRandomize = true dcbCalled := false closedCbCalled := false dch := make(chan bool) dcb := func(nc *nats.Conn) { // Suppress any additional calls if dcbCalled { return } dcbCalled = true dch <- true } ccb := func(_ *nats.Conn) { closedCbCalled = true } nc, err := nats.Connect(srvs, nats.DontRandomize(), nats.DisconnectHandler(dcb), nats.ClosedHandler(ccb)) if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // wait for disconnect if e := WaitTime(dch, 2*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } // Wait, want to make sure we don't spin on reconnect to non-existent servers. time.Sleep(1 * time.Second) // Make sure we are still reconnecting.. if closedCbCalled { t.Fatal("Closed CB was triggered, should not have been.") } if status := nc.Status(); status != nats.RECONNECTING { t.Fatalf("Wrong status: %d\n", status) } } func TestProperFalloutAfterMaxAttempts(t *testing.T) { s1 := RunServerOnPort(1222) defer s1.Shutdown() opts := nats.GetDefaultOptions() // Reduce the list of servers for Windows tests if runtime.GOOS == "windows" { opts.Servers = testServers[:2] opts.MaxReconnect = 2 opts.Timeout = 100 * time.Millisecond } else { opts.Servers = testServers opts.MaxReconnect = 5 } opts.NoRandomize = true opts.ReconnectWait = (25 * time.Millisecond) nats.ReconnectJitter(0, 0)(&opts) dch := make(chan bool) opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } closedCbCalled := false cch := make(chan bool) opts.ClosedCB = func(_ *nats.Conn) { closedCbCalled = true cch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // On Windows, creating a TCP connection to a server not running takes more than // a second. So be generous with the WaitTime. // wait for disconnect if e := WaitTime(dch, 5*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } // Wait for ClosedCB if e := WaitTime(cch, 5*time.Second); e != nil { t.Fatal("Did not receive a closed callback message") } // Make sure we are not still reconnecting.. if !closedCbCalled { t.Logf("%+v\n", nc) t.Fatal("Closed CB was not triggered, should have been.") } // Expect connection to be closed... if !nc.IsClosed() { t.Fatalf("Wrong status: %d\n", nc.Status()) } } func TestProperFalloutAfterMaxAttemptsWithAuthMismatch(t *testing.T) { var myServers = []string{ "nats://127.0.0.1:1222", "nats://127.0.0.1:4443", } s1 := RunServerOnPort(1222) defer s1.Shutdown() s2, _ := RunServerWithConfig("./configs/tlsverify.conf") defer s2.Shutdown() opts := nats.GetDefaultOptions() opts.Servers = myServers opts.NoRandomize = true if runtime.GOOS == "windows" { opts.MaxReconnect = 2 opts.Timeout = 100 * time.Millisecond } else { opts.MaxReconnect = 5 } opts.ReconnectWait = (25 * time.Millisecond) nats.ReconnectJitter(0, 0)(&opts) dch := make(chan bool) opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } closedCbCalled := false cch := make(chan bool) opts.ClosedCB = func(_ *nats.Conn) { closedCbCalled = true cch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // On Windows, creating a TCP connection to a server not running takes more than // a second. So be generous with the WaitTime. // wait for disconnect if e := WaitTime(dch, 5*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } // Wait for ClosedCB if e := WaitTime(cch, 5*time.Second); e != nil { reconnects := nc.Stats().Reconnects t.Fatalf("Did not receive a closed callback message, #reconnects: %v", reconnects) } // Make sure we have not exceeded MaxReconnect reconnects := nc.Stats().Reconnects if reconnects != uint64(opts.MaxReconnect) { t.Fatalf("Num reconnects was %v, expected %v", reconnects, opts.MaxReconnect) } // Make sure we are not still reconnecting.. if !closedCbCalled { t.Logf("%+v\n", nc) t.Fatal("Closed CB was not triggered, should have been.") } // Expect connection to be closed... if !nc.IsClosed() { t.Fatalf("Wrong status: %d\n", nc.Status()) } } func TestTimeoutOnNoServers(t *testing.T) { s1 := RunServerOnPort(1222) defer s1.Shutdown() opts := nats.GetDefaultOptions() if runtime.GOOS == "windows" { opts.Servers = testServers[:2] opts.MaxReconnect = 2 opts.Timeout = 100 * time.Millisecond opts.ReconnectWait = (100 * time.Millisecond) nats.ReconnectJitter(0, 0)(&opts) } else { opts.Servers = testServers // 1 second total time wait opts.MaxReconnect = 10 opts.ReconnectWait = (100 * time.Millisecond) nats.ReconnectJitter(0, 0)(&opts) } opts.NoRandomize = true dch := make(chan bool) opts.DisconnectedErrCB = func(nc *nats.Conn, _ error) { // Suppress any additional calls nc.SetDisconnectErrHandler(nil) dch <- true } cch := make(chan bool) opts.ClosedCB = func(_ *nats.Conn) { cch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // On Windows, creating a connection to a non-running server takes // more than a second. So be generous with WaitTime // wait for disconnect if e := WaitTime(dch, 5*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } startWait := time.Now() // Wait for ClosedCB if e := WaitTime(cch, 5*time.Second); e != nil { t.Fatal("Did not receive a closed callback message") } if runtime.GOOS != "windows" { timeWait := time.Since(startWait) // Use 500ms as variable time delta variable := (500 * time.Millisecond) expected := (time.Duration(opts.MaxReconnect) * opts.ReconnectWait) if timeWait > (expected + variable) { t.Fatalf("Waited too long for Closed state: %d\n", timeWait/time.Millisecond) } } } func TestPingReconnect(t *testing.T) { RECONNECTS := 4 s1 := RunServerOnPort(1222) defer s1.Shutdown() opts := nats.GetDefaultOptions() opts.Servers = testServers opts.NoRandomize = true opts.Timeout = 100 * time.Millisecond opts.ReconnectWait = 200 * time.Millisecond nats.ReconnectJitter(0, 0)(&opts) opts.PingInterval = 50 * time.Millisecond opts.MaxPingsOut = -1 var wg sync.WaitGroup wg.Add(1) rch := make(chan time.Time, RECONNECTS) dch := make(chan time.Time, RECONNECTS) opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { d := dch select { case d <- time.Now(): default: d = nil } } opts.ReconnectedCB = func(c *nats.Conn) { r := rch select { case r <- time.Now(): default: r = nil wg.Done() } } nc, err := opts.Connect() if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() wg.Wait() s1.Shutdown() <-dch for i := 0; i < RECONNECTS-1; i++ { disconnectedAt := <-dch reconnectAt := <-rch pingCycle := disconnectedAt.Sub(reconnectAt) if pingCycle > 2*opts.PingInterval { t.Fatalf("Reconnect due to ping took %s", pingCycle.String()) } } } type checkPoolUpdatedDialer struct { conn net.Conn first, final bool ra int } func (d *checkPoolUpdatedDialer) Dial(network, address string) (net.Conn, error) { doReal := false if d.first { d.first = false doReal = true } else if d.final { d.ra++ return nil, errors.New("On purpose") } else { d.ra++ if d.ra == 15 { d.ra = 0 doReal = true } } if doReal { c, err := net.Dial(network, address) if err != nil { return nil, err } d.conn = c return c, nil } return nil, errors.New("On purpose") } func TestServerPoolUpdatedWhenRouteGoesAway(t *testing.T) { if err := serverVersionAtLeast(1, 0, 7); err != nil { t.Skip(err.Error()) } s1Opts := test.DefaultTestOptions s1Opts.Host = "127.0.0.1" s1Opts.Port = 4222 s1Opts.Cluster.Host = "127.0.0.1" s1Opts.Cluster.Port = 6222 s1Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6223,nats://127.0.0.1:6224") s1 := test.RunServer(&s1Opts) defer s1.Shutdown() s1Url := "nats://127.0.0.1:4222" s2Url := "nats://127.0.0.1:4223" s3Url := "nats://127.0.0.1:4224" ch := make(chan bool, 1) chch := make(chan bool, 1) connHandler := func(_ *nats.Conn) { chch <- true } nc, err := nats.Connect(s1Url, nats.ReconnectHandler(connHandler), nats.DiscoveredServersHandler(func(_ *nats.Conn) { ch <- true })) if err != nil { t.Fatalf("Error on connect") } s2Opts := test.DefaultTestOptions s2Opts.Host = "127.0.0.1" s2Opts.Port = s1Opts.Port + 1 s2Opts.Cluster.Host = "127.0.0.1" s2Opts.Cluster.Port = 6223 s2Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222,nats://127.0.0.1:6224") s2 := test.RunServer(&s2Opts) defer s2.Shutdown() // Wait to be notified if err := Wait(ch); err != nil { t.Fatal("New server callback was not invoked") } checkPool := func(expected []string) { // Don't use discovered here, but Servers to have the full list. // Also, there may be cases where the mesh is not formed yet, // so try again on failure. var ( ds []string timeout = time.Now().Add(5 * time.Second) ) for time.Now().Before(timeout) { ds = nc.Servers() if len(ds) == len(expected) { m := make(map[string]struct{}, len(ds)) for _, url := range ds { m[url] = struct{}{} } ok := true for _, url := range expected { if _, present := m[url]; !present { ok = false break } } if ok { return } } time.Sleep(50 * time.Millisecond) } stackFatalf(t, "Expected %v, got %v", expected, ds) } // Verify that we now know about s2 checkPool([]string{s1Url, s2Url}) s3Opts := test.DefaultTestOptions s3Opts.Host = "127.0.0.1" s3Opts.Port = s2Opts.Port + 1 s3Opts.Cluster.Host = "127.0.0.1" s3Opts.Cluster.Port = 6224 s3Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222,nats://127.0.0.1:6223") s3 := test.RunServer(&s3Opts) defer s3.Shutdown() // Wait to be notified if err := Wait(ch); err != nil { t.Fatal("New server callback was not invoked") } // Verify that we now know about s3 checkPool([]string{s1Url, s2Url, s3Url}) // Stop s1. Since this was passed to the Connect() call, this one should // still be present. s1.Shutdown() // Wait for reconnect if err := Wait(chch); err != nil { t.Fatal("Reconnect handler not invoked") } checkPool([]string{s1Url, s2Url, s3Url}) // Check the server we reconnected to. reConnectedTo := nc.ConnectedUrl() expected := []string{s1Url} restartS2 := false if reConnectedTo == s2Url { restartS2 = true s2.Shutdown() expected = append(expected, s3Url) } else if reConnectedTo == s3Url { s3.Shutdown() expected = append(expected, s2Url) } else { t.Fatalf("Unexpected server client has reconnected to: %v", reConnectedTo) } // Wait for reconnect if err := Wait(chch); err != nil { t.Fatal("Reconnect handler not invoked") } // The implicit server that we just shutdown should have been removed from the pool checkPool(expected) // Restart the one that was shutdown and check that it is now back in the pool if restartS2 { s2 = test.RunServer(&s2Opts) defer s2.Shutdown() expected = append(expected, s2Url) } else { s3 = test.RunServer(&s3Opts) defer s3.Shutdown() expected = append(expected, s3Url) } // Since this is not a "new" server, the DiscoveredServersCB won't be invoked. checkPool(expected) nc.Close() // Restart s1 s1 = test.RunServer(&s1Opts) defer s1.Shutdown() // We should have all 3 servers running now... // Create a client connection with special dialer. d := &checkPoolUpdatedDialer{first: true} nc, err = nats.Connect(s1Url, nats.MaxReconnects(10), nats.ReconnectWait(15*time.Millisecond), nats.ReconnectJitter(0, 0), nats.SetCustomDialer(d), nats.ReconnectHandler(connHandler), nats.ClosedHandler(connHandler)) if err != nil { t.Fatalf("Error on connect") } defer nc.Close() // Make sure that we have all 3 servers in the pool (this will wait if required) checkPool(expected) // Cause disconnection between client and server. We are going to reconnect // and we want to check that when we get the INFO again with the list of // servers, we don't lose the knowledge of how many times we tried to // reconnect. d.conn.Close() // Wait for client to reconnect to a server if err := Wait(chch); err != nil { t.Fatal("Reconnect handler not invoked") } // At this point, we should have tried to reconnect 5 times to each server. // For the one we reconnected to, its max reconnect attempts should have been // cleared, not for the other ones. // Cause a disconnect again and ensure we won't reconnect. d.final = true d.conn.Close() // Wait for Close callback to be invoked. if err := Wait(chch); err != nil { t.Fatal("Close handler not invoked") } // Since MaxReconnect is 10, after trying 5 more times on 2 of the servers, // these should have been removed. We have still 5 more tries for the server // we did previously reconnect to. // So total of reconnect attempt should be: 2*5+1*10=20 if d.ra != 20 { t.Fatalf("Should have tried to reconnect 20 more times, got %v", d.ra) } nc.Close() } nats.go-1.41.0/test/compat_test.go000066400000000000000000000411351477351342400170140ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build compat package test import ( "bytes" "context" "crypto/sha256" "encoding/json" "io" "net/http" "os" "strings" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" "github.com/nats-io/nats.go/micro" ) type objectStepConfig[T any] struct { Suite string `json:"suite"` Test string `json:"test"` Command string `json:"command"` URL string `json:"url"` Bucket string `json:"bucket"` Object string `json:"object"` Config T `json:"config"` } func TestCompatibilityObjectStoreDefaultBucket(t *testing.T) { t.Parallel() nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.default-bucket.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() // 1. Create default bucket msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } ctx := context.Background() _, err = js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{ Bucket: "test", }) if err != nil { t.Fatalf("Error creating object store: %v", err) } // send empty response to indicate client is done if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreCustomBucket(t *testing.T) { t.Parallel() nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.custom-bucket.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() // 1. Create custom bucket msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } var cfg objectStepConfig[jetstream.ObjectStoreConfig] if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } _, err = js.CreateObjectStore(ctx, cfg.Config) if err != nil { t.Fatalf("Error creating object store: %v", err) } // send empty response to indicate client is done if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreGetObject(t *testing.T) { t.Parallel() type config struct { Bucket string `json:"bucket"` Object string `json:"object"` } nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.get-object.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } var cfg config if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } // Get object os, err := js.ObjectStore(ctx, cfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } obj, err := os.Get(ctx, cfg.Object) if err != nil { t.Fatalf("Error creating object store: %v", err) } data, err := io.ReadAll(obj) if err != nil { t.Fatalf("Error reading object: %v", err) } // calculate sha256 of the object h := sha256.New() h.Write(data) sha := h.Sum(nil) // send response to indicate client is done if err := msg.Respond(sha); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStorePutObject(t *testing.T) { t.Parallel() nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.put-object.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Put object var putObjectCfg objectStepConfig[jetstream.ObjectMeta] if err := json.Unmarshal(msg.Data, &putObjectCfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, putObjectCfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } client := http.Client{Timeout: 10 * time.Second, Transport: &http.Transport{DisableKeepAlives: true}} resp, err := client.Get(putObjectCfg.URL) if err != nil { t.Fatalf("Error getting content: %v", err) } data, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("Error reading content: %v", err) } defer resp.Body.Close() if _, err := os.Put(ctx, putObjectCfg.Config, bytes.NewBuffer(data)); err != nil { t.Fatalf("Error putting object: %v", err) } if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreUpdateMetadata(t *testing.T) { t.Parallel() nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.update-metadata.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Update object metadata var putObjectCfg objectStepConfig[jetstream.ObjectMeta] if err := json.Unmarshal(msg.Data, &putObjectCfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, putObjectCfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } if err := os.UpdateMeta(ctx, putObjectCfg.Object, putObjectCfg.Config); err != nil { t.Fatalf("Error putting object: %v", err) } if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreWatch(t *testing.T) { t.Parallel() type config struct { Bucket string `json:"bucket"` Object string `json:"object"` } nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.watch.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Watch object var cfg config if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, cfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } watcher, err := os.Watch(ctx) if err != nil { t.Fatalf("Error getting watcher: %v", err) } var digests []string var info *jetstream.ObjectInfo // get the initial value select { case info = <-watcher.Updates(): digests = append(digests, info.Digest) case <-time.After(30 * time.Second): t.Fatalf("Timeout waiting for object update") } // init done, should receive nil select { case info = <-watcher.Updates(): if info != nil { t.Fatalf("Expected nil, got: %v", info) } case <-time.After(30 * time.Second): t.Fatalf("Timeout waiting for object update") } // get the updated value select { case info = <-watcher.Updates(): digests = append(digests, info.Digest) case <-time.After(30 * time.Second): t.Fatalf("Timeout waiting for object update") } if err := msg.Respond([]byte(strings.Join(digests, ","))); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreWatchUpdates(t *testing.T) { t.Parallel() type config struct { Bucket string `json:"bucket"` Object string `json:"object"` } nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.watch-updates.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Watch object var cfg config if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, cfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } watcher, err := os.Watch(ctx, jetstream.UpdatesOnly()) if err != nil { t.Fatalf("Error getting watcher: %v", err) } var info *jetstream.ObjectInfo select { case info = <-watcher.Updates(): case <-time.After(30 * time.Second): t.Fatalf("Timeout waiting for object update") } if err := msg.Respond([]byte(info.Digest)); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStoreGetLink(t *testing.T) { t.Parallel() type config struct { Bucket string `json:"bucket"` Object string `json:"object"` } nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.get-link.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Watch object var cfg config if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, cfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } obj, err := os.Get(ctx, cfg.Object) if err != nil { t.Fatalf("Error getting object: %v", err) } data, err := io.ReadAll(obj) if err != nil { t.Fatalf("Error reading object: %v", err) } // calculate sha256 of the object h := sha256.New() h.Write(data) sha := h.Sum(nil) if err := msg.Respond(sha); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func TestCompatibilityObjectStorePutLink(t *testing.T) { t.Parallel() type config struct { Bucket string `json:"bucket"` Object string `json:"object"` LinkName string `json:"link_name"` } nc := connect(t) js, err := jetstream.New(nc) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } defer nc.Close() ctx := context.Background() // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.object-store.put-link.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } // Watch object var cfg config if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } os, err := js.ObjectStore(ctx, cfg.Bucket) if err != nil { t.Fatalf("Error getting object store: %v", err) } sourceObj, err := os.GetInfo(ctx, cfg.Object) if err != nil { t.Fatalf("Error getting object: %v", err) } _, err = os.AddLink(ctx, cfg.LinkName, sourceObj) if err != nil { t.Fatalf("Error adding link: %v", err) } if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } func validateTestResult(t *testing.T, sub *nats.Subscription) { t.Helper() stepEnd, err := sub.NextMsg(5 * time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } if strings.Contains(string(stepEnd.Subject), "fail") { t.Fatalf("Test step failed: %v", string(stepEnd.Subject)) } } func connect(t *testing.T) *nats.Conn { t.Helper() natsURL := os.Getenv("NATS_URL") if natsURL == "" { natsURL = nats.DefaultURL } nc, err := nats.Connect(natsURL, nats.Timeout(1*time.Hour)) if err != nil { t.Fatalf("Error connecting to NATS: %v", err) } return nc } func connectJS(t *testing.T) (*nats.Conn, nats.JetStreamContext) { nc := connect(t) js, err := nc.JetStream() if err != nil { t.Fatalf("Error getting JetStream context: %v", err) } return nc, js } type serviceStepConfig[T any] struct { Suite string `json:"suite"` Test string `json:"test"` Command string `json:"command"` Config T `json:"config"` } func TestTestCompatibilityService(t *testing.T) { t.Parallel() nc := connect(t) defer nc.Close() type groupConfig struct { Name string `json:"name"` QueueGroup string `json:"queue_group"` } type endpointConfig struct { micro.EndpointConfig Name string `json:"name"` Group string `json:"group"` } type config struct { micro.Config Groups []groupConfig `json:"groups"` Endpoints []endpointConfig `json:"endpoints"` } echoHandler := micro.HandlerFunc(func(req micro.Request) { req.Respond(req.Data()) }) errHandler := micro.HandlerFunc(func(req micro.Request) { req.Error("500", "handler error", nil) }) // setup subscription on which tester will be sending requests sub, err := nc.SubscribeSync("tests.service.core.>") if err != nil { t.Fatalf("Error subscribing to test subject: %v", err) } defer sub.Unsubscribe() // 1. Get service and endpoint configs msg, err := sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } var cfg serviceStepConfig[*config] if err := json.Unmarshal(msg.Data, &cfg); err != nil { t.Fatalf("Error unmarshalling message: %v", err) } var services []micro.Service svcCfg := cfg.Config svcCfg.StatsHandler = func(e *micro.Endpoint) any { return map[string]string{"endpoint": e.Name} } svc, err := micro.AddService(nc, svcCfg.Config) if err != nil { t.Fatalf("Error adding service: %v", err) } groups := make(map[string]micro.Group) for _, groupCfg := range svcCfg.Groups { opts := []micro.GroupOpt{} if groupCfg.QueueGroup != "" { opts = append(opts, micro.WithGroupQueueGroup(groupCfg.QueueGroup)) } groups[groupCfg.Name] = svc.AddGroup(groupCfg.Name, opts...) } for _, endpointCfg := range svcCfg.Endpoints { opts := []micro.EndpointOpt{ micro.WithEndpointSubject(endpointCfg.Subject), } if endpointCfg.QueueGroup != "" { opts = append(opts, micro.WithEndpointQueueGroup(endpointCfg.QueueGroup)) } if endpointCfg.Metadata != nil { opts = append(opts, micro.WithEndpointMetadata(endpointCfg.Metadata)) } handler := echoHandler if endpointCfg.Name == "faulty" { handler = errHandler } if endpointCfg.Group != "" { g := groups[endpointCfg.Group] if g == nil { t.Fatalf("Group %q not found", endpointCfg.Group) } if err := g.AddEndpoint(endpointCfg.Name, handler, opts...); err != nil { t.Fatalf("Error adding endpoint: %v", err) } } else { if err := svc.AddEndpoint(endpointCfg.Name, handler, opts...); err != nil { t.Fatalf("Error adding endpoint: %v", err) } } } services = append(services, svc) if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } // 2. Stop services msg, err = sub.NextMsg(1 * time.Hour) if err != nil { t.Fatalf("Error getting message: %v", err) } for _, svc := range services { svc.Stop() } if err := msg.Respond(nil); err != nil { t.Fatalf("Error responding to message: %v", err) } validateTestResult(t, sub) } nats.go-1.41.0/test/configs/000077500000000000000000000000001477351342400155675ustar00rootroot00000000000000nats.go-1.41.0/test/configs/certs/000077500000000000000000000000001477351342400167075ustar00rootroot00000000000000nats.go-1.41.0/test/configs/certs/ca.pem000066400000000000000000000031531477351342400177770ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEkDCCA3igAwIBAgIUSZwW7btc9EUbrMWtjHpbM0C2bSEwDQYJKoZIhvcNAQEL BQAwcTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNVBAoM B1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xKTAnBgNVBAMMIENlcnRpZmljYXRl IEF1dGhvcml0eSAyMDIyLTA4LTI3MB4XDTIyMDgyNzIwMjMwMloXDTMyMDgyNDIw MjMwMlowcTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNV BAoMB1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xKTAnBgNVBAMMIENlcnRpZmlj YXRlIEF1dGhvcml0eSAyMDIyLTA4LTI3MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A MIIBCgKCAQEAqilVqyY8rmCpTwAsLF7DEtWEq37KbljBWVjmlp2Wo6TgMd3b537t 6iO8+SbI8KH75i63RcxV3Uzt1/L9Yb6enDXF52A/U5ugmDhaa+Vsoo2HBTbCczmp qndp7znllQqn7wNLv6aGSvaeIUeYS5Dmlh3kt7Vqbn4YRANkOUTDYGSpMv7jYKSu 1ee05Rco3H674zdwToYto8L8V7nVMrky42qZnGrJTaze+Cm9tmaIyHCwUq362CxS dkmaEuWx11MOIFZvL80n7ci6pveDxe5MIfwMC3/oGn7mbsSqidPMcTtjw6ey5NEu Z0UrC/2lL1FtF4gnVMKUSaEhU2oKjj0ZAQIDAQABo4IBHjCCARowHQYDVR0OBBYE FP7Pfz4u7sSt6ltviEVsx4hIFIs6MIGuBgNVHSMEgaYwgaOAFP7Pfz4u7sSt6ltv iEVsx4hIFIs6oXWkczBxMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5p YTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UECwwHbmF0cy5pbzEpMCcGA1UEAwwg Q2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMjItMDgtMjeCFEmcFu27XPRFG6zFrYx6 WzNAtm0hMAwGA1UdEwQFMAMBAf8wOgYJYIZIAYb4QgENBC0WK25hdHMuaW8gbmF0 cy1zZXJ2ZXIgdGVzdC1zdWl0ZSB0cmFuc2llbnQgQ0EwDQYJKoZIhvcNAQELBQAD ggEBAHDCHLQklYZlnzHDaSwxgGSiPUrCf2zhk2DNIYSDyBgdzrIapmaVYQRrCBtA j/4jVFesgw5WDoe4TKsyha0QeVwJDIN8qg2pvpbmD8nOtLApfl0P966vcucxDwqO dQWrIgNsaUdHdwdo0OfvAlTfG0v/y2X0kbL7h/el5W9kWpxM/rfbX4IHseZL2sLq FH69SN3FhMbdIm1ldrcLBQVz8vJAGI+6B9hSSFQWljssE0JfAX+8VW/foJgMSx7A vBTq58rLkAko56Jlzqh/4QT+ckayg9I73v1Q5/44jP1mHw35s5ZrzpDQt2sVv4l5 lwRPJFXMwe64flUs9sM+/vqJaIY= -----END CERTIFICATE----- nats.go-1.41.0/test/configs/certs/client-cert-invalid.pem000066400000000000000000000032601477351342400232500ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIExDCCAyygAwIBAgIQEdLeZgsrEsLe37gR/voylTANBgkqhkiG9w0BAQsFADCB szEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMUQwQgYDVQQLDDtwaW90 cnBpb3Ryb3dza2lAUGlvdHJzLU1hY0Jvb2stUHJvLmxvY2FsIChQaW90ciBQaW90 cm93c2tpKTFLMEkGA1UEAwxCbWtjZXJ0IHBpb3RycGlvdHJvd3NraUBQaW90cnMt TWFjQm9vay1Qcm8ubG9jYWwgKFBpb3RyIFBpb3Ryb3dza2kpMB4XDTIzMDUxMjEw MTYyOFoXDTI1MDgxMjEwMTYyOFowbzEnMCUGA1UEChMebWtjZXJ0IGRldmVsb3Bt ZW50IGNlcnRpZmljYXRlMUQwQgYDVQQLDDtwaW90cnBpb3Ryb3dza2lAUGlvdHJz LU1hY0Jvb2stUHJvLmxvY2FsIChQaW90ciBQaW90cm93c2tpKTCCASIwDQYJKoZI hvcNAQEBBQADggEPADCCAQoCggEBAJqlzdmIcPNu8ad7TlrJA2SdAtaxYJJK8lRs oAdq+PO7JeWE8NyEPSEFpXclWsEvG49gS6AueLPRuVT4WqbIDVqm5Rvcx/a1K39i 6Ik3qpLerGY8vPngIhXoU4CUjNrEyJy22bhCPidJtPRnfkVv/eI6LSA2jWsikGNH Iqnbu6KtUIKbnLuuH0NR8ycYaqeiOdaCMV6STSXmM5S96qH7h7NexGC08b+aersw CLelFR04J0RV/cax7U1pgsWKKv8icnjiB4tq5IbYuEZE0g/uJZ3BsB5DXKq+WjNI uRDJJUWyzrWhBPNIW/1I2zesEXSKCvDcUVAuUceYxiEokR4+pv0CAwEAAaOBljCB kzAOBgNVHQ8BAf8EBAMCBaAwJwYDVR0lBCAwHgYIKwYBBQUHAwIGCCsGAQUFBwMB BggrBgEFBQcDBDAfBgNVHSMEGDAWgBQbD6YymnmaX19FroClM52B8doDIDA3BgNV HREEMDAugglsb2NhbGhvc3SBD2VtYWlsQGxvY2FsaG9zdIcQAAAAAAAAAAAAAAAA AAAAATANBgkqhkiG9w0BAQsFAAOCAYEAJuFrQ0KdmwEc7UyaoTygW59f1JSJGbZa Ii5EuMtpSon5DX5NaI5aRE350UtimNrQIu8LAPx1UGwSRuPkzvuNAA/l0HAJrqh3 gEorH6fbsRkqkDUvmNiqTfs+So6R0s2+6yVG6t8+NT1OBH616eQ9efvthwRO0AAL L8LGJJdYMveEJv+GB/+Zs75MQUxniJ+ip/YxF8bcaRjVS/tb3J52yZ1Eb2UU18kN uAlFOxiKnwvb2csFcZ6zc4Fpm0LfCrpzPCwGF5y6bsjzpqVej87ea6roG9BJ7vbX xjbwGfchJZmDsG/g9MeoQoIifYqupQmtaQtlKUUD5MRjDhpOVUEJ4tsXDoZEz9DB kviE+VlIGU2QJ5l9KU2rIdxfh95rrIaqCt5xsT6wUjNtv0wAfbhMannUhjLv+h+G tIbMIEo0GFA/uY1eXLO4PTgF+EojqFfpUUM17Z3kubsOSvepxkwyipA5eI2fkThu Yu5Oyyq9X9Y3vnDMvHKJfkzA56Sp19Oy -----END CERTIFICATE----- nats.go-1.41.0/test/configs/certs/client-cert.pem000066400000000000000000000126411477351342400216270ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 38:4c:16:24:9b:04:1c:b3:db:e0:4c:3c:ed:b7:40:7d:68:b5:fa:1f Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=California, O=Synadia, OU=nats.io, CN=Certificate Authority 2022-08-27 Validity Not Before: Aug 27 20:23:02 2022 GMT Not After : Aug 24 20:23:02 2032 GMT Subject: C=US, ST=California, O=Synadia, OU=nats.io, CN=localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:ac:9c:3e:9d:3b:7a:12:56:85:78:ca:df:9c:fc: 0c:7e:5e:f2:4f:22:33:46:81:38:53:d7:a7:25:8f: d7:ee:16:13:e2:67:49:88:f6:94:99:f0:a9:a6:db: fe:7a:17:c9:e3:df:31:73:71:38:70:3a:96:1e:99: 7b:5d:07:e3:63:e4:e8:bf:99:f7:3d:5c:27:f5:b7: 37:29:da:ee:82:80:00:d4:c8:d3:1b:36:0d:8b:d3: 8a:9b:8e:12:a1:4d:0c:c5:22:f8:56:3b:6a:1a:fb: e9:3d:08:1e:13:7f:55:6e:2e:65:93:9a:90:54:03: 6d:0d:e6:44:d6:f7:c0:d7:d8:e1:c7:1e:c2:9b:a3: 6e:88:f1:7c:58:08:a2:9f:13:cc:5b:b9:11:2c:1d: 23:6f:3a:ae:47:9a:0f:6a:ce:e5:80:34:09:e6:e3: fd:76:4a:cf:5a:18:bb:9c:c5:c1:74:49:67:77:1b: ba:28:86:31:a6:fc:12:af:4a:85:1b:73:5b:f4:d6: 42:ff:0c:1c:49:e7:31:f2:5a:2a:1e:cd:87:cb:22: ff:70:1c:48:ed:ba:e0:be:f0:bc:9e:e0:dc:59:db: a5:74:25:58:b3:61:04:f6:33:28:6b:07:25:60:0f: 72:93:16:6c:9f:b0:ad:4a:18:f7:9e:29:1e:b7:61: 34:17 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: nats.io nats-server test-suite certificate X509v3 Subject Key Identifier: 1F:14:EF:2B:53:AB:28:4A:93:42:98:AE:85:06:0F:B4:7D:DC:36:AE X509v3 Authority Key Identifier: keyid:FE:CF:7F:3E:2E:EE:C4:AD:EA:5B:6F:88:45:6C:C7:88:48:14:8B:3A DirName:/C=US/ST=California/O=Synadia/OU=nats.io/CN=Certificate Authority 2022-08-27 serial:49:9C:16:ED:BB:5C:F4:45:1B:AC:C5:AD:8C:7A:5B:33:40:B6:6D:21 X509v3 Subject Alternative Name: DNS:localhost, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1, email:derek@nats.io Netscape Cert Type: SSL Client X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Extended Key Usage: TLS Web Client Authentication Signature Algorithm: sha256WithRSAEncryption 60:43:0b:c6:11:0b:96:ae:03:dc:77:26:9a:4a:bd:6a:d7:03: ec:43:16:2d:ba:8c:e5:50:fa:57:a9:1f:2f:a4:15:c3:a8:13: b9:d3:59:2a:97:7c:ae:ce:a9:f8:44:e4:97:ee:7d:09:dc:74: 38:80:94:cf:47:e0:84:52:2a:91:44:8a:85:55:da:42:6a:f1: 91:1a:6e:5a:63:e6:0b:61:3c:0d:b0:aa:17:b8:77:94:32:20: 4d:20:8f:84:56:64:ae:ef:d8:8d:42:b5:52:4d:b0:1c:46:97: bc:4c:77:8c:3f:a3:73:43:87:27:71:62:e7:fe:02:de:a1:27: 77:be:86:29:8f:62:a1:d9:e7:ea:61:33:73:f4:1f:0a:12:14: 68:eb:7d:8c:71:5b:42:e7:48:10:c9:df:30:3b:5b:eb:69:29: b6:95:bc:09:fc:01:b0:be:fc:9f:ee:c4:f3:df:a0:01:c5:68: 20:f5:2f:f8:e7:1c:a5:4c:a8:a8:a2:20:a1:d2:0f:f6:f6:c4: 0d:f5:26:fd:ea:8b:b5:06:a9:9e:17:35:47:f7:fd:6e:78:3d: 5f:7a:87:ed:21:b2:4e:e9:6a:d1:d9:ed:0e:cf:43:61:83:7c: fe:0d:b1:ad:ff:fa:2d:2b:36:9d:99:9c:20:48:21:0d:36:c8: dd:b6:0a:d8 -----BEGIN CERTIFICATE----- MIIE5zCCA8+gAwIBAgIUOEwWJJsEHLPb4Ew87bdAfWi1+h8wDQYJKoZIhvcNAQEL BQAwcTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNVBAoM B1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xKTAnBgNVBAMMIENlcnRpZmljYXRl IEF1dGhvcml0eSAyMDIyLTA4LTI3MB4XDTIyMDgyNzIwMjMwMloXDTMyMDgyNDIw MjMwMlowWjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNV BAoMB1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xEjAQBgNVBAMMCWxvY2FsaG9z dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKycPp07ehJWhXjK35z8 DH5e8k8iM0aBOFPXpyWP1+4WE+JnSYj2lJnwqabb/noXyePfMXNxOHA6lh6Ze10H 42Pk6L+Z9z1cJ/W3Nyna7oKAANTI0xs2DYvTipuOEqFNDMUi+FY7ahr76T0IHhN/ VW4uZZOakFQDbQ3mRNb3wNfY4ccewpujbojxfFgIop8TzFu5ESwdI286rkeaD2rO 5YA0Cebj/XZKz1oYu5zFwXRJZ3cbuiiGMab8Eq9KhRtzW/TWQv8MHEnnMfJaKh7N h8si/3AcSO264L7wvJ7g3FnbpXQlWLNhBPYzKGsHJWAPcpMWbJ+wrUoY954pHrdh NBcCAwEAAaOCAYwwggGIMAkGA1UdEwQCMAAwOQYJYIZIAYb4QgENBCwWKm5hdHMu aW8gbmF0cy1zZXJ2ZXIgdGVzdC1zdWl0ZSBjZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU HxTvK1OrKEqTQpiuhQYPtH3cNq4wga4GA1UdIwSBpjCBo4AU/s9/Pi7uxK3qW2+I RWzHiEgUizqhdaRzMHExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh MRAwDgYDVQQKDAdTeW5hZGlhMRAwDgYDVQQLDAduYXRzLmlvMSkwJwYDVQQDDCBD ZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAyMi0wOC0yN4IUSZwW7btc9EUbrMWtjHpb M0C2bSEwOwYDVR0RBDQwMoIJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA AAABgQ1kZXJla0BuYXRzLmlvMBEGCWCGSAGG+EIBAQQEAwIHgDALBgNVHQ8EBAMC BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAGBDC8YR C5auA9x3JppKvWrXA+xDFi26jOVQ+lepHy+kFcOoE7nTWSqXfK7OqfhE5JfufQnc dDiAlM9H4IRSKpFEioVV2kJq8ZEablpj5gthPA2wqhe4d5QyIE0gj4RWZK7v2I1C tVJNsBxGl7xMd4w/o3NDhydxYuf+At6hJ3e+himPYqHZ5+phM3P0HwoSFGjrfYxx W0LnSBDJ3zA7W+tpKbaVvAn8AbC+/J/uxPPfoAHFaCD1L/jnHKVMqKiiIKHSD/b2 xA31Jv3qi7UGqZ4XNUf3/W54PV96h+0hsk7patHZ7Q7PQ2GDfP4Nsa3/+i0rNp2Z nCBIIQ02yN22Ctg= -----END CERTIFICATE----- nats.go-1.41.0/test/configs/certs/client-key-invalid.pem000066400000000000000000000032501477351342400231020ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCapc3ZiHDzbvGn e05ayQNknQLWsWCSSvJUbKAHavjzuyXlhPDchD0hBaV3JVrBLxuPYEugLniz0blU +FqmyA1apuUb3Mf2tSt/YuiJN6qS3qxmPLz54CIV6FOAlIzaxMicttm4Qj4nSbT0 Z35Fb/3iOi0gNo1rIpBjRyKp27uirVCCm5y7rh9DUfMnGGqnojnWgjFekk0l5jOU veqh+4ezXsRgtPG/mnq7MAi3pRUdOCdEVf3Gse1NaYLFiir/InJ44geLauSG2LhG RNIP7iWdwbAeQ1yqvlozSLkQySVFss61oQTzSFv9SNs3rBF0igrw3FFQLlHHmMYh KJEePqb9AgMBAAECggEAMypWT/mPfUsgksv+IZVOFRTJoqSvEdfQE1SZIbsnwOQT Zru0QRFTdEB8/U2TmETwtmAixU16y+vAiLderr2ThYGgXbaPRjWsvYnI69VKDyuz GGRSFc4tGNh0AB+l9p+SzB7HK+pmy/Lb9tzi7zBdbGLZGUZTRbX61Y3sjwxPKUPw mrTindOvSH6FbVevAC6UCl92R9vk4ugS/oDZWKPntHeJX8NzXM6MfZ68+oPKeGXE DTQ98nZDfZMRDvyyClaAVfstsPV1pxYNYIWaB1w0saL2NZz08zZeGnkqFt621Q4V gsE9t9Gjg1o5paq0MEm5vBoJo7VyCoR7w/sfEzQaAQKBgQDAk/yOY2yxGcjZjE8q ozXq/EtbC/ldKcgngm1KYtA7ZyzRt7gGuAuv6sbmri4wpHL1D2UeKS8QPsywkXvM Oto3NdJraXbC26ObCP+njwWHWD1Zh3BD2O2mYOFUsTWzsxaZJbsy6Sh4cLg+0gVc UYzqOnUY5EJh6hCnGSZnK4v4fQKBgQDNk/QbK4RrRH33qaAly/Ihw5ZI76s3hc4Y RcsGi05iAV/jiE9HVF8vWytp1EdoLsO0BPrA9RPP8SYZdHCWh0KFYJtFzU+o8+1W ThtCIPdmOmAtQnoj52TMmwc+x/WbNIBvBrKQHIbTX9JHUiGFM9NqSuVjNhVDOzvM /o2D38swgQKBgBRzou68QF7OjjYMYJv2mVNLV/VjYCg0t7z6bQDpXZPxcSEUkcak 5RjZpiX5eY5Q6KR97g818HmZMcPOr4cQ+PvEC4S8vpATI1zjp8LzvXKSPHG1oIaU EykIQOXtq/ZZnpzFFQxjFpkz311MkKUtQ/ncG3N5SlN7uCkG0r1CMqtBAoGBAL/z myVXb9Bc5qW+a7t+/7oJDyVRK/Su6m39lQGqR2j5UZh5qVS38hycqx+ox3f+2lsX ny9WZsZtq55u+8WBzFoPh0wY1X2zLXO9gHQxpe99KFp6TOODZropMw2q1aiy0A1b GpW3HSj2urg/du8SIiCIiEEnuZjKER9qu6Zb6zSBAoGBALxqe9jb7WLArV/eMEtx zg7V/FZfFyqEGEbLMM9njM6uiSq0u17H5bsvmgi+dAot16BbDPKWdOw01zQDhphe GbchPMuNOPNyBm3MIJ5zXi4pQcc5W+Z5z54X9BCBJwIHEp+Tt9VJ6J9/RkSoTXp9 iq9elhb5bfMSA/KliX3cBTge -----END PRIVATE KEY----- nats.go-1.41.0/test/configs/certs/client-key.pem000066400000000000000000000032541477351342400214620ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCsnD6dO3oSVoV4 yt+c/Ax+XvJPIjNGgThT16clj9fuFhPiZ0mI9pSZ8Kmm2/56F8nj3zFzcThwOpYe mXtdB+Nj5Oi/mfc9XCf1tzcp2u6CgADUyNMbNg2L04qbjhKhTQzFIvhWO2oa++k9 CB4Tf1VuLmWTmpBUA20N5kTW98DX2OHHHsKbo26I8XxYCKKfE8xbuREsHSNvOq5H mg9qzuWANAnm4/12Ss9aGLucxcF0SWd3G7oohjGm/BKvSoUbc1v01kL/DBxJ5zHy WioezYfLIv9wHEjtuuC+8Lye4NxZ26V0JVizYQT2MyhrByVgD3KTFmyfsK1KGPee KR63YTQXAgMBAAECggEBAKc6FHt2NPTxOAxn2C6aDmycBftesfiblnu8EWaVrmgu oYMV+CsmYZ+mhmZu+mNFCsam5JzoUvp/+BKbNeZSjx2nl0qRmvOqhdhLcbkuLybl ZmjAS64wNv2Bq+a6xRfaswWGtLuugkS0TCph4+mV0qmVb7mJ5ExQqWXu8kCl9QHn uKacp1wVFok9rmEI+byL1+Z01feKrkf/hcF6dk62U7zHNPajViJFTDww7hiHyfUH 6qsxIe1UWSNKtE61haEHkzqbDIDAy79jX4t3JobLToeVNCbJ7BSPf2IQSPJxELVL sidIJhndEjsbDR2CLpIF/EjsiSIaP7jh2zC9fxFpgSkCgYEA1qH0PH1JD5FqRV/p n9COYa6EifvSymGo4u/2FHgtX7wNSIQvqAVXenrQs41mz9E65womeqFXT/AZglaM 1PEjjwcFlDuLvUEYYJNgdXrIC515ZXS6TdvJ0JpQJLx28GzZ7h31tZXfwn68C3/i UGEHp+nN1BfBBQnsqvmGFFvHZFUCgYEAzeDlZHHijBlgHU+kGzKm7atJfAGsrv6/ tw7CIMEsL+z/y7pl3nwDLdZF+mLIvGuKlwIRajEzbYcEuVymCyG2/SmPMQEUf6j+ C1OmorX9CW8OwHmVCajkIgKn0ICFsF9iFv6aYZmm1kG48AIuYiQ7HOvY/MlilqFs 1p8sw6ZpQrsCgYEAj7Z9fQs+omfxymYAXnwc+hcKtAGkENL3bIzULryRVSrrkgTA jDaXbnFR0Qf7MWedkxnezfm+Js5TpkwhnGuiLaC8AZclaCFwGypTShZeYDifEmno XT2vkjfhNdfjo/Ser6vr3BxwaSDG9MQ6Wyu9HpeUtFD7c05D4++T8YnKpskCgYEA pCkcoIAStcWSFy0m3K0B3+dBvAiVyh/FfNDeyEFf24Mt4CPsEIBwBH+j4ugbyeoy YwC6JCPBLyeHA8q1d5DVmX4m+Fs1HioBD8UOzRUyA/CzIZSQ21f5OIlHiIDCmQUl cNJpBUQAfT2AmpgSphzfqcsBhWeLHjLvVx8rEYLC0fsCgYAiHdPZ3C0f7rWZP93N gY4DuldiO4d+KVsWAdBxeNgPznisUI7/ZZ/9NvCxGvA5NynyZr0qlpiKzVvtFJG8 1ZPUuFFRMAaWn9h5C+CwMPgk65tFC6lw/el0hpmcocSXVdiJEbkV0rnv9iGh0CYX HMACGrYlyZdDYM0CH/JAM+K/QQ== -----END PRIVATE KEY----- nats.go-1.41.0/test/configs/certs/key.pem000066400000000000000000000032441477351342400202050ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDm+0dlzcmiLa+L zdVqeVQ8B1/rWnErK+VvvjH7FmVodg5Z5+RXyojpd9ZBrVd6QrLSVMQPfFvBvGGX 4yI6Ph5KXUefa31vNOOMhp2FGSmaEVhETKGQ0xRh4VfaAerOP5Cunl0TbSyJyjkV a7aeMtcqTEiFL7Ae2EtiMhTrMrYpBDQ8rzm2i1IyTb9DX5v7DUOmrSynQSlVyXCz tRVGNL/kHlItpEku1SHt/AD3ogu8EgqQZFB8xRRw9fubYgh4Q0kx80e4k9QtTKnc F3B2NGb/ZcE5Z+mmHIBq8J2zKMijOrdd3m5TbQmzDbETEOjs4L1eoZRLcL/cvYu5 gmXdr4F7AgMBAAECggEBAK4sr3MiEbjcsHJAvXyzjwRRH1Bu+8VtLW7swe2vvrpd w4aiKXrV/BXpSsRtvPgxkXyvdMSkpuBZeFI7cVTwAJFc86RQPt77x9bwr5ltFwTZ rXCbRH3b3ZPNhByds3zhS+2Q92itu5cPyanQdn2mor9/lHPyOOGZgobCcynELL6R wRElkeDyf5ODuWEd7ADC5IFyZuwb3azNVexIK+0yqnMmv+QzEW3hsycFmFGAeB7v MIMjb2BhLrRr6Y5Nh+k58yM5DCf9h/OJhDpeXwLkxyK4BFg+aZffEbUX0wHDMR7f /nMv1g6cKvDWiLU8xLzez4t2qNIBNdxw5ZSLyQRRolECgYEA+ySTKrBAqI0Uwn8H sUFH95WhWUXryeRyGyQsnWAjZGF1+d67sSY2un2W6gfZrxRgiNLWEFq9AaUs0MuH 6syF4Xwx/aZgU/gvsGtkgzuKw1bgvekT9pS/+opmHRCZyQAFEHj0IEpzyB6rW1u/ LdlR3ShEENnmXilFv/uF/uXP5tMCgYEA63LiT0w46aGPA/E+aLRWU10c1eZ7KdhR c3En6zfgIxgFs8J38oLdkOR0CF6T53DSuvGR/OprVKdlnUhhDxBgT1oQjK2GlhPx JV5uMvarJDJxAwsF+7T4H2QtZ00BtEfpyp790+TlypSG1jo/BnSMmX2uEbV722lY hzINLY49obkCgYBEpN2YyG4T4+PtuXznxRkfogVk+kiVeVx68KtFJLbnw//UGT4i EHjbBmLOevDT+vTb0QzzkWmh3nzeYRM4aUiatjCPzP79VJPsW54whIDMHZ32KpPr TQMgPt3kSdpO5zN7KiRIAzGcXE2n/e7GYGUQ1uWr2XMu/4byD5SzdCscQwJ/Ymii LoKtRvk/zWYHr7uwWSeR5dVvpQ3E/XtONAImrIRd3cRqXfJUqTrTRKxDJXkCmyBc 5FkWg0t0LUkTSDiQCJqcUDA3EINFR1kwthxja72pfpwc5Be/nV9BmuuUysVD8myB qw8A/KsXsHKn5QrRuVXOa5hvLEXbuqYw29mX6QKBgDGDzIzpR9uPtBCqzWJmc+IJ z4m/1NFlEz0N0QNwZ/TlhyT60ytJNcmW8qkgOSTHG7RDueEIzjQ8LKJYH7kXjfcF 6AJczUG5PQo9cdJKo9JP3e1037P/58JpLcLe8xxQ4ce03zZpzhsxR2G/tz8DstJs b8jpnLyqfGrcV2feUtIZ -----END PRIVATE KEY----- nats.go-1.41.0/test/configs/certs/key_noip.pem000066400000000000000000000032501477351342400212270ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCt8Ic/MmaHejGb ylQKrqYayiXVxfxJayEL3qcVyJw8zUEdMiV3aHuD6F0Uei4L6kGRpCDsIBcPy41M G4ig0ndGZX7RoOZMS8aMOaGzWzRXyKEQDBNUOnSQezu62kFigfXctXNsgzj0oVKr vcKVPnn/r6Su39YR2SkguLQV4zKTXDbOVrQBAqFFMaOhHuq4xAEEVxFE9FXq4q5o CHCFwFv/ur/ei7yhxgOiL4rrnrd5OmdqsHDT6AinEiTVu1eIcjfI5i7bh+AqcRos kJyIKQx1KITWf3UtUAg2K8/zujNyHnoH2yDamDs5hpZM4kpCYRqbC2dNbRPRn0Df EseNnVBpAgMBAAECggEAcmiqXRwmqmfqZ4Ge4+Pap/ZdCo6OkjAf7XHHTyHD+o47 jRul3zPfQnU9fDGdRgMQm95sNUQqRx5pUy0tIjMtdyVdVD9UG80fzK4/uPx9olv5 7Nc0g4trjnkwYYgbx9KZyFGlmTN67BWMjiBj88zDbDW4ybm7UcQYNEipU1g8tQW1 tUwcZ1oahXfzO75vcMqDVlS2IE0s0AD9sh+AaJIwxV9kSLNjlSwkpsH6PBKKB/3r WvG2p6Og1whdQ54PGADUVSx1yWFyXQDeygqLmryEWaHJQz1jt7bvaaAMy2PTdwVf A5LVG3VHkoQOBv8imtpCbU2J7zAk9ypDuRUlpa8h/QKBgQDdCCCbV02BhrqDYchm ojB95Vx8KtvQdXhvsxShxyuIktuB7W+NnheBmLY0TNcYSQyzithCUBhtmyaC5S4f dHmT52e7HS0xaL9r9BhAQrtWReMcplKB1IIXtdYXEY3qOjZMxX3seJo0iBWS3hMH EG6tC6tlr5ZXOKJOrBMGuMgplwKBgQDJdSYkC3AX2p+4BNf3hgQyzotuSVSbx/zu 0ZHhi8Wp7yF49c8+9+ahO9AMrVM0ZSh2buznfF46FNC/C55M7a9Rn60sFQQ16b5L rJTzlPoUGTnPLt8C3TdMIFg/5cAW6ZgZWNlU3aVU0W34NVh/H2m/M72tGrk250zs YhZ8/RGV/wKBgQCKlMfs3YXoyhIywaImR1Zj+ORNrYl4X86NKhirffbbgEhEZBvn DNHsHVVP4UWTImnmQA1rNlC6l+ZDd3G9owd/Jj0xYg+txOEPzFFQKQbQBq1ojxd3 80dFmmqKuCTkUG8vHzvegZcdjJ0KIlaHvVPHB2QFM1vtf8Kz1MtxEXXeLQKBgDn0 Bm3WEH/8N3gzhIFDP0/yVO/8DmfmByAYj5PHpqw1C3cFl4HwxJrbXwVWkxn+g75W OLZ684xX0pky2W4d7hJYEfQdc6GixUh1tD/COpKvkw7D2Am146N1po1zJWgx+LxJ 7/NW86nLuYvupK+lNMF5O/ZhOqjNrzZNHVUFZBq3AoGAPwixh7/ZMX6mmm8foImh qibytx72gl1jhHWSaX3rwrSOO9dxO2rlI7LOZQrarU632Y9KMkP3HNbBHPRkA4MI 6I9wqawRzGjcpeXIMlPzOHDHYLyrTpEzo8nrSNk/cM8P4RxE12FqySzQIkiN06J7 AxJ7hVqtX6wZIoqoOa9aK1E= -----END PRIVATE KEY----- nats.go-1.41.0/test/configs/certs/server.pem000066400000000000000000000130141477351342400207170ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 1d:d9:1f:06:dd:fd:90:26:4e:27:ea:2e:01:4b:31:e6:d2:49:31:1f Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=California, O=Synadia, OU=nats.io, CN=Certificate Authority 2022-08-27 Validity Not Before: Aug 27 20:23:02 2022 GMT Not After : Aug 24 20:23:02 2032 GMT Subject: C=US, ST=California, O=Synadia, OU=nats.io, CN=localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:e6:fb:47:65:cd:c9:a2:2d:af:8b:cd:d5:6a:79: 54:3c:07:5f:eb:5a:71:2b:2b:e5:6f:be:31:fb:16: 65:68:76:0e:59:e7:e4:57:ca:88:e9:77:d6:41:ad: 57:7a:42:b2:d2:54:c4:0f:7c:5b:c1:bc:61:97:e3: 22:3a:3e:1e:4a:5d:47:9f:6b:7d:6f:34:e3:8c:86: 9d:85:19:29:9a:11:58:44:4c:a1:90:d3:14:61:e1: 57:da:01:ea:ce:3f:90:ae:9e:5d:13:6d:2c:89:ca: 39:15:6b:b6:9e:32:d7:2a:4c:48:85:2f:b0:1e:d8: 4b:62:32:14:eb:32:b6:29:04:34:3c:af:39:b6:8b: 52:32:4d:bf:43:5f:9b:fb:0d:43:a6:ad:2c:a7:41: 29:55:c9:70:b3:b5:15:46:34:bf:e4:1e:52:2d:a4: 49:2e:d5:21:ed:fc:00:f7:a2:0b:bc:12:0a:90:64: 50:7c:c5:14:70:f5:fb:9b:62:08:78:43:49:31:f3: 47:b8:93:d4:2d:4c:a9:dc:17:70:76:34:66:ff:65: c1:39:67:e9:a6:1c:80:6a:f0:9d:b3:28:c8:a3:3a: b7:5d:de:6e:53:6d:09:b3:0d:b1:13:10:e8:ec:e0: bd:5e:a1:94:4b:70:bf:dc:bd:8b:b9:82:65:dd:af: 81:7b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: nats.io nats-server test-suite certificate X509v3 Subject Key Identifier: 2B:8C:A3:8B:DB:DB:5C:CE:18:DB:F6:A8:31:4E:C2:3E:EE:D3:40:7E X509v3 Authority Key Identifier: keyid:FE:CF:7F:3E:2E:EE:C4:AD:EA:5B:6F:88:45:6C:C7:88:48:14:8B:3A DirName:/C=US/ST=California/O=Synadia/OU=nats.io/CN=Certificate Authority 2022-08-27 serial:49:9C:16:ED:BB:5C:F4:45:1B:AC:C5:AD:8C:7A:5B:33:40:B6:6D:21 X509v3 Subject Alternative Name: DNS:localhost, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1 Netscape Cert Type: SSL Client, SSL Server X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Extended Key Usage: TLS Web Server Authentication, Netscape Server Gated Crypto, Microsoft Server Gated Crypto, TLS Web Client Authentication Signature Algorithm: sha256WithRSAEncryption 54:49:34:2b:38:d1:aa:3b:43:60:4c:3f:6a:f8:74:ca:49:53: a1:af:12:d3:a8:17:90:7b:9d:a3:69:13:6e:da:2c:b7:61:31: ac:eb:00:93:92:fc:0c:10:d4:18:a0:16:61:94:4b:42:cb:eb: 7a:f6:80:c6:45:c0:9c:09:aa:a9:48:e8:36:e3:c5:be:36:e0: e9:78:2a:bb:ab:64:9b:20:eb:e6:0f:63:2b:59:c3:58:0b:3a: 84:15:04:c1:7e:12:03:1b:09:25:8d:4c:03:e8:18:26:c0:6c: b7:90:b1:fd:bc:f1:cf:d0:d5:4a:03:15:71:0c:7d:c1:76:87: 92:f1:3e:bc:75:51:5a:c4:36:a4:ff:91:98:df:33:5d:a7:38: de:50:29:fd:0f:c8:55:e6:8f:24:c2:2e:98:ab:d9:5d:65:2f: 50:cc:25:f6:84:f2:21:2e:5e:76:d0:86:1e:69:8b:cb:8a:3a: 2d:79:21:5e:e7:f7:2d:06:18:a1:13:cb:01:c3:46:91:2a:de: b4:82:d7:c3:62:6f:08:a1:d5:90:19:30:9d:64:8e:e4:f8:ba: 4f:2f:ba:13:b4:a3:9f:d1:d5:77:64:8a:3e:eb:53:c5:47:ac: ab:3e:0e:7a:9b:a6:f4:48:25:66:eb:c7:4c:f9:50:24:eb:71: e0:75:ae:e6 -----BEGIN CERTIFICATE----- MIIE+TCCA+GgAwIBAgIUHdkfBt39kCZOJ+ouAUsx5tJJMR8wDQYJKoZIhvcNAQEL BQAwcTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNVBAoM B1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xKTAnBgNVBAMMIENlcnRpZmljYXRl IEF1dGhvcml0eSAyMDIyLTA4LTI3MB4XDTIyMDgyNzIwMjMwMloXDTMyMDgyNDIw MjMwMlowWjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNV BAoMB1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xEjAQBgNVBAMMCWxvY2FsaG9z dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOb7R2XNyaItr4vN1Wp5 VDwHX+tacSsr5W++MfsWZWh2Dlnn5FfKiOl31kGtV3pCstJUxA98W8G8YZfjIjo+ HkpdR59rfW8044yGnYUZKZoRWERMoZDTFGHhV9oB6s4/kK6eXRNtLInKORVrtp4y 1ypMSIUvsB7YS2IyFOsytikENDyvObaLUjJNv0Nfm/sNQ6atLKdBKVXJcLO1FUY0 v+QeUi2kSS7VIe38APeiC7wSCpBkUHzFFHD1+5tiCHhDSTHzR7iT1C1MqdwXcHY0 Zv9lwTln6aYcgGrwnbMoyKM6t13eblNtCbMNsRMQ6OzgvV6hlEtwv9y9i7mCZd2v gXsCAwEAAaOCAZ4wggGaMAkGA1UdEwQCMAAwOQYJYIZIAYb4QgENBCwWKm5hdHMu aW8gbmF0cy1zZXJ2ZXIgdGVzdC1zdWl0ZSBjZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU K4yji9vbXM4Y2/aoMU7CPu7TQH4wga4GA1UdIwSBpjCBo4AU/s9/Pi7uxK3qW2+I RWzHiEgUizqhdaRzMHExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh MRAwDgYDVQQKDAdTeW5hZGlhMRAwDgYDVQQLDAduYXRzLmlvMSkwJwYDVQQDDCBD ZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAyMi0wOC0yN4IUSZwW7btc9EUbrMWtjHpb M0C2bSEwLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAA AAABMBEGCWCGSAGG+EIBAQQEAwIGwDALBgNVHQ8EBAMCBaAwNAYDVR0lBC0wKwYI KwYBBQUHAwEGCWCGSAGG+EIEAQYKKwYBBAGCNwoDAwYIKwYBBQUHAwIwDQYJKoZI hvcNAQELBQADggEBAFRJNCs40ao7Q2BMP2r4dMpJU6GvEtOoF5B7naNpE27aLLdh MazrAJOS/AwQ1BigFmGUS0LL63r2gMZFwJwJqqlI6Dbjxb424Ol4KrurZJsg6+YP YytZw1gLOoQVBMF+EgMbCSWNTAPoGCbAbLeQsf288c/Q1UoDFXEMfcF2h5LxPrx1 UVrENqT/kZjfM12nON5QKf0PyFXmjyTCLpir2V1lL1DMJfaE8iEuXnbQhh5pi8uK Oi15IV7n9y0GGKETywHDRpEq3rSC18Nibwih1ZAZMJ1kjuT4uk8vuhO0o5/R1Xdk ij7rU8VHrKs+DnqbpvRIJWbrx0z5UCTrceB1ruY= -----END CERTIFICATE----- nats.go-1.41.0/test/configs/certs/server_noip.pem000066400000000000000000000126721477351342400217550ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 1d:5c:7c:59:0c:cd:27:83:dd:97:64:53:b0:44:3c:b4:5b:d4:fc:d1 Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=California, O=Synadia, OU=nats.io, CN=Certificate Authority 2022-08-27 Validity Not Before: Aug 27 20:23:02 2022 GMT Not After : Aug 24 20:23:02 2032 GMT Subject: C=US, ST=California, O=Synadia, OU=nats.io, CN=localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:ad:f0:87:3f:32:66:87:7a:31:9b:ca:54:0a:ae: a6:1a:ca:25:d5:c5:fc:49:6b:21:0b:de:a7:15:c8: 9c:3c:cd:41:1d:32:25:77:68:7b:83:e8:5d:14:7a: 2e:0b:ea:41:91:a4:20:ec:20:17:0f:cb:8d:4c:1b: 88:a0:d2:77:46:65:7e:d1:a0:e6:4c:4b:c6:8c:39: a1:b3:5b:34:57:c8:a1:10:0c:13:54:3a:74:90:7b: 3b:ba:da:41:62:81:f5:dc:b5:73:6c:83:38:f4:a1: 52:ab:bd:c2:95:3e:79:ff:af:a4:ae:df:d6:11:d9: 29:20:b8:b4:15:e3:32:93:5c:36:ce:56:b4:01:02: a1:45:31:a3:a1:1e:ea:b8:c4:01:04:57:11:44:f4: 55:ea:e2:ae:68:08:70:85:c0:5b:ff:ba:bf:de:8b: bc:a1:c6:03:a2:2f:8a:eb:9e:b7:79:3a:67:6a:b0: 70:d3:e8:08:a7:12:24:d5:bb:57:88:72:37:c8:e6: 2e:db:87:e0:2a:71:1a:2c:90:9c:88:29:0c:75:28: 84:d6:7f:75:2d:50:08:36:2b:cf:f3:ba:33:72:1e: 7a:07:db:20:da:98:3b:39:86:96:4c:e2:4a:42:61: 1a:9b:0b:67:4d:6d:13:d1:9f:40:df:12:c7:8d:9d: 50:69 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: nats.io nats-server test-suite certificate X509v3 Subject Key Identifier: C9:AA:3C:08:39:7E:C1:42:C0:3D:B7:2F:84:21:E7:8A:30:E7:C7:B1 X509v3 Authority Key Identifier: keyid:FE:CF:7F:3E:2E:EE:C4:AD:EA:5B:6F:88:45:6C:C7:88:48:14:8B:3A DirName:/C=US/ST=California/O=Synadia/OU=nats.io/CN=Certificate Authority 2022-08-27 serial:49:9C:16:ED:BB:5C:F4:45:1B:AC:C5:AD:8C:7A:5B:33:40:B6:6D:21 X509v3 Subject Alternative Name: DNS:localhost Netscape Cert Type: SSL Client, SSL Server X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Extended Key Usage: TLS Web Server Authentication, Netscape Server Gated Crypto, Microsoft Server Gated Crypto, TLS Web Client Authentication Signature Algorithm: sha256WithRSAEncryption 9b:63:ae:ec:56:ec:0c:7a:d5:88:d1:0a:0a:81:29:37:4f:a6: 08:b8:78:78:23:af:5b:b7:65:61:d7:64:2a:c9:e7:a6:d2:b1: cb:36:bf:23:2e:2d:48:85:7f:16:0f:64:af:03:db:5d:0e:a7: 14:c5:f6:04:b2:6b:92:27:ba:cb:d2:13:25:a2:15:b0:8e:4a: 2d:eb:41:18:09:b1:68:d5:0f:6b:56:da:86:ed:4a:7a:29:30: 09:77:63:a4:64:3d:e3:2e:d7:6f:1a:8c:96:c9:cb:81:fe:a3: 6d:35:e3:09:ea:9b:2e:da:8c:8e:c8:c9:69:b1:83:e7:6f:2d: 5f:a1:ac:32:ae:29:57:a9:5c:9b:7d:f0:fd:47:3c:f3:6a:d0: eb:77:8d:70:06:a2:74:3d:d6:37:1e:7b:e7:d9:e4:33:c9:9d: ad:fa:24:c6:4d:e2:2c:c9:25:cb:75:be:8d:e9:83:7e:ad:db: 53:9e:97:be:d5:7f:83:90:fc:75:1d:02:29:b7:99:18:a3:39: 25:a2:54:b7:21:7d:be:0b:4c:ea:ff:80:b9:4b:5e:21:ed:25: ad:d4:62:52:59:79:83:32:df:30:a1:64:68:05:cc:35:ad:8b: d3:66:6b:b1:31:b7:b3:b2:d8:0f:5b:96:40:ef:57:1d:7f:b0: b0:f4:e9:db -----BEGIN CERTIFICATE----- MIIE4TCCA8mgAwIBAgIUHVx8WQzNJ4Pdl2RTsEQ8tFvU/NEwDQYJKoZIhvcNAQEL BQAwcTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNVBAoM B1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xKTAnBgNVBAMMIENlcnRpZmljYXRl IEF1dGhvcml0eSAyMDIyLTA4LTI3MB4XDTIyMDgyNzIwMjMwMloXDTMyMDgyNDIw MjMwMlowWjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEDAOBgNV BAoMB1N5bmFkaWExEDAOBgNVBAsMB25hdHMuaW8xEjAQBgNVBAMMCWxvY2FsaG9z dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3whz8yZod6MZvKVAqu phrKJdXF/ElrIQvepxXInDzNQR0yJXdoe4PoXRR6LgvqQZGkIOwgFw/LjUwbiKDS d0ZlftGg5kxLxow5obNbNFfIoRAME1Q6dJB7O7raQWKB9dy1c2yDOPShUqu9wpU+ ef+vpK7f1hHZKSC4tBXjMpNcNs5WtAECoUUxo6Ee6rjEAQRXEUT0VerirmgIcIXA W/+6v96LvKHGA6Iviuuet3k6Z2qwcNPoCKcSJNW7V4hyN8jmLtuH4CpxGiyQnIgp DHUohNZ/dS1QCDYrz/O6M3IeegfbINqYOzmGlkziSkJhGpsLZ01tE9GfQN8Sx42d UGkCAwEAAaOCAYYwggGCMAkGA1UdEwQCMAAwOQYJYIZIAYb4QgENBCwWKm5hdHMu aW8gbmF0cy1zZXJ2ZXIgdGVzdC1zdWl0ZSBjZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU yao8CDl+wULAPbcvhCHnijDnx7Ewga4GA1UdIwSBpjCBo4AU/s9/Pi7uxK3qW2+I RWzHiEgUizqhdaRzMHExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh MRAwDgYDVQQKDAdTeW5hZGlhMRAwDgYDVQQLDAduYXRzLmlvMSkwJwYDVQQDDCBD ZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAyMi0wOC0yN4IUSZwW7btc9EUbrMWtjHpb M0C2bSEwFAYDVR0RBA0wC4IJbG9jYWxob3N0MBEGCWCGSAGG+EIBAQQEAwIGwDAL BgNVHQ8EBAMCBaAwNAYDVR0lBC0wKwYIKwYBBQUHAwEGCWCGSAGG+EIEAQYKKwYB BAGCNwoDAwYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAJtjruxW7Ax61YjR CgqBKTdPpgi4eHgjr1u3ZWHXZCrJ56bSscs2vyMuLUiFfxYPZK8D210OpxTF9gSy a5InusvSEyWiFbCOSi3rQRgJsWjVD2tW2obtSnopMAl3Y6RkPeMu128ajJbJy4H+ o2014wnqmy7ajI7IyWmxg+dvLV+hrDKuKVepXJt98P1HPPNq0Ot3jXAGonQ91jce e+fZ5DPJna36JMZN4izJJct1vo3pg36t21Oel77Vf4OQ/HUdAim3mRijOSWiVLch fb4LTOr/gLlLXiHtJa3UYlJZeYMy3zChZGgFzDWti9Nma7Ext7Oy2A9blkDvVx1/ sLD06ds= -----END CERTIFICATE----- nats.go-1.41.0/test/configs/docker/000077500000000000000000000000001477351342400170365ustar00rootroot00000000000000nats.go-1.41.0/test/configs/docker/Dockerfile000066400000000000000000000005201477351342400210250ustar00rootroot00000000000000FROM golang:1.22 WORKDIR /usr/src/nats.go COPY . /usr/src/nats.go RUN go mod tidy -modfile go_test.mod RUN go test -run TestNone -modfile go_test.mod -tags compat ./test/... ENV NATS_URL=localhost:4222 ENTRYPOINT ["go", "test", "-v", "-modfile", "go_test.mod", "-tags", "compat", "./test/...", "-count", "1", "-parallel", "10", "-run"] nats.go-1.41.0/test/configs/tls.conf000066400000000000000000000003701477351342400172400ustar00rootroot00000000000000# Simple TLS config file port: 4443 net: localhost # net interface tls { cert_file: "./configs/certs/server.pem" key_file: "./configs/certs/key.pem" timeout: 2 } authorization { user: derek password: porkchop timeout: 1 } nats.go-1.41.0/test/configs/tls_noip_a.conf000066400000000000000000000005111477351342400205620ustar00rootroot00000000000000# TLS config file # Cert has no IPs listen: localhost:5222 tls { cert_file: "./configs/certs/server_noip.pem" key_file: "./configs/certs/key_noip.pem" timeout: 2 } authorization { user: derek password: porkchop timeout: 1 } cluster { listen: 127.0.0.1:5244 routes = [nats-route://127.0.0.1:5246] } nats.go-1.41.0/test/configs/tls_noip_b.conf000066400000000000000000000006061477351342400205700ustar00rootroot00000000000000# TLS config file # Cert has no IPs, so we use an IP here to simulate a single hostname cluster. listen: 127.0.0.1:5224 tls { cert_file: "./configs/certs/server_noip.pem" key_file: "./configs/certs/key_noip.pem" timeout: 2 } authorization { user: derek password: porkchop timeout: 1 } cluster { listen: 127.0.0.1:5246 routes = [nats-route://127.0.0.1:5244] } nats.go-1.41.0/test/configs/tlsverify.conf000066400000000000000000000004521477351342400204660ustar00rootroot00000000000000# Simple TLS config file port: 4443 net: localhost tls { cert_file: "./configs/certs/server.pem" key_file: "./configs/certs/key.pem" timeout: 2 # Optional certificate authority for clients ca_file: "./configs/certs/ca.pem" # Require a client certificate verify: true } nats.go-1.41.0/test/conn_test.go000066400000000000000000002433141477351342400164710ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bufio" "bytes" "crypto/tls" "crypto/x509" "errors" "fmt" "net" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" ) func TestDefaultConnection(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) nc.Close() } func TestConnectionStatus(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() if nc.Status() != nats.CONNECTED || nc.Status().String() != "CONNECTED" { t.Fatal("Should have status set to CONNECTED") } if !nc.IsConnected() { t.Fatal("Should have status set to CONNECTED") } nc.Close() if nc.Status() != nats.CLOSED || nc.Status().String() != "CLOSED" { t.Fatal("Should have status set to CLOSED") } if !nc.IsClosed() { t.Fatal("Should have status set to CLOSED") } } func TestConnClosedCB(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ch := make(chan bool) o := nats.GetDefaultOptions() o.Url = nats.DefaultURL o.ClosedCB = func(_ *nats.Conn) { ch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } nc.Close() if e := Wait(ch); e != nil { t.Fatalf("Closed callback not triggered\n") } } func TestCloseDisconnectedErrCB(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ch := make(chan bool) o := nats.GetDefaultOptions() o.Url = nats.DefaultURL o.AllowReconnect = false o.DisconnectedErrCB = func(_ *nats.Conn, _ error) { ch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } nc.Close() if e := Wait(ch); e != nil { t.Fatal("Disconnected callback not triggered") } } func TestServerStopDisconnectedErrCB(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ch := make(chan bool) o := nats.GetDefaultOptions() o.Url = nats.DefaultURL o.AllowReconnect = false o.DisconnectedErrCB = func(nc *nats.Conn, _ error) { ch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() s.Shutdown() if e := Wait(ch); e != nil { t.Fatalf("Disconnected callback not triggered\n") } } func TestServerSecureConnections(t *testing.T) { s, opts := RunServerWithConfig("./configs/tls.conf") defer s.Shutdown() endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) secureURL := fmt.Sprintf("nats://%s:%s@%s/", opts.Username, opts.Password, endpoint) // Make sure this succeeds nc, err := nats.Connect(secureURL, nats.Secure(), nats.RootCAs("./configs/certs/ca.pem")) if err != nil { t.Fatalf("Failed to create secure (TLS) connection: %v", err) } defer nc.Close() omsg := []byte("Hello World") checkRecv := make(chan bool) received := 0 nc.Subscribe("foo", func(m *nats.Msg) { received++ if !bytes.Equal(m.Data, omsg) { t.Fatal("Message received does not match") } checkRecv <- true }) err = nc.Publish("foo", omsg) if err != nil { t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) } nc.Flush() state, err := nc.TLSConnectionState() if err != nil { t.Fatalf("Expected connection state: %v", err) } if !state.HandshakeComplete { t.Fatalf("Expected valid connection state") } if err := Wait(checkRecv); err != nil { t.Fatal("Failed receiving message") } nc.Close() // Server required, but not specified in Connect(), should switch automatically nc, err = nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem")) if err != nil { t.Fatalf("Failed to create secure (TLS) connection: %v", err) } nc.Close() // Test flag mismatch // Wanted but not available.. ds := RunDefaultServer() defer ds.Shutdown() nc, err = nats.Connect(nats.DefaultURL, nats.Secure(), nats.RootCAs("./configs/certs/ca.pem")) if err == nil || nc != nil || err != nats.ErrSecureConnWanted { if nc != nil { nc.Close() } t.Fatalf("Should have failed to create connection: %v", err) } // Let's be more TLS correct and verify servername, endpoint etc. // Now do more advanced checking, verifying servername and using rootCA. // Setup our own TLSConfig using RootCA from our self signed cert. rootPEM, err := os.ReadFile("./configs/certs/ca.pem") if err != nil || rootPEM == nil { t.Fatalf("failed to read root certificate") } pool := x509.NewCertPool() ok := pool.AppendCertsFromPEM([]byte(rootPEM)) if !ok { t.Fatal("failed to parse root certificate") } tls1 := &tls.Config{ ServerName: opts.Host, RootCAs: pool, MinVersion: tls.VersionTLS12, } nc, err = nats.Connect(secureURL, nats.Secure(tls1), nats.RootCAs("./configs/certs/ca.pem")) if err != nil { t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) } defer nc.Close() tls2 := &tls.Config{ ServerName: "OtherHostName", RootCAs: pool, MinVersion: tls.VersionTLS12, } nc2, err := nats.Connect(secureURL, nats.Secure(tls1, tls2)) if err == nil { nc2.Close() t.Fatal("Was expecting an error!") } } func TestClientTLSConfig(t *testing.T) { s, opts := RunServerWithConfig("./configs/tlsverify.conf") defer s.Shutdown() endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) secureURL := fmt.Sprintf("nats://%s", endpoint) // Make sure this fails nc, err := nats.Connect(secureURL, nats.Secure()) if err == nil { nc.Close() t.Fatal("Should have failed (TLS) connection without client certificate") } cert, err := os.ReadFile("./configs/certs/client-cert.pem") if err != nil { t.Fatal("Failed to read client certificate") } key, err := os.ReadFile("./configs/certs/client-key.pem") if err != nil { t.Fatal("Failed to read client key") } rootCAs, err := os.ReadFile("./configs/certs/ca.pem") if err != nil { t.Fatal("Failed to read root CAs") } certCB := func() (tls.Certificate, error) { cert, err := tls.X509KeyPair(cert, key) if err != nil { return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err) } cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err) } return cert, nil } caCB := func() (*x509.CertPool, error) { pool := x509.NewCertPool() ok := pool.AppendCertsFromPEM(rootCAs) if !ok { return nil, errors.New("nats: failed to parse root certificate from") } return pool, nil } // Check parameters validity _, err = nats.Connect(secureURL, nats.ClientTLSConfig(nil, nil)) if !errors.Is(err, nats.ErrClientCertOrRootCAsRequired) { t.Fatalf("Expected error %q, got %q", nats.ErrClientCertOrRootCAsRequired, err) } certErr := &tls.CertificateVerificationError{} // Should fail because of missing CA _, err = nats.Connect(secureURL, nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")) if ok := errors.As(err, &certErr); !ok { t.Fatalf("Expected error %q, got %q", nats.ErrClientCertOrRootCAsRequired, err) } // Should fail because of missing certificate _, err = nats.Connect(secureURL, nats.ClientTLSConfig(nil, caCB)) if !strings.Contains(err.Error(), "bad certificate") && !strings.Contains(err.Error(), "certificate required") { t.Fatalf("Expected missing certificate error; got: %s", err) } nc, err = nats.Connect(secureURL, nats.ClientTLSConfig(certCB, caCB)) if err != nil { t.Fatalf("Failed to create (TLS) connection: %v", err) } defer nc.Close() omsg := []byte("Hello!") checkRecv := make(chan bool) received := 0 nc.Subscribe("foo", func(m *nats.Msg) { received++ if !bytes.Equal(m.Data, omsg) { t.Fatal("Message received does not match") } checkRecv <- true }) err = nc.Publish("foo", omsg) if err != nil { t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) } nc.Flush() if err := Wait(checkRecv); err != nil { t.Fatal("Failed to receive message") } } func TestClientCertificate(t *testing.T) { s, opts := RunServerWithConfig("./configs/tlsverify.conf") defer s.Shutdown() endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) secureURL := fmt.Sprintf("nats://%s", endpoint) // Make sure this fails nc, err := nats.Connect(secureURL, nats.Secure()) if err == nil { nc.Close() t.Fatal("Should have failed (TLS) connection without client certificate") } // Check parameters validity nc, err = nats.Connect(secureURL, nats.ClientCert("", "")) if err == nil { nc.Close() t.Fatal("Should have failed due to invalid parameters") } // Should fail because wrong key nc, err = nats.Connect(secureURL, nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/key.pem")) if err == nil { nc.Close() t.Fatal("Should have failed due to invalid key") } // Should fail because no CA nc, err = nats.Connect(secureURL, nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")) if err == nil { nc.Close() t.Fatal("Should have failed due to missing ca") } nc, err = nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem"), nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")) if err != nil { t.Fatalf("Failed to create (TLS) connection: %v", err) } defer nc.Close() omsg := []byte("Hello!") checkRecv := make(chan bool) received := 0 nc.Subscribe("foo", func(m *nats.Msg) { received++ if !bytes.Equal(m.Data, omsg) { t.Fatal("Message received does not match") } checkRecv <- true }) err = nc.Publish("foo", omsg) if err != nil { t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) } nc.Flush() if err := Wait(checkRecv); err != nil { t.Fatal("Failed to receive message") } } func TestClientCertificateReloadOnServerRestart(t *testing.T) { copyFiles := func(t *testing.T, cpFiles map[string]string) { for from, to := range cpFiles { content, err := os.ReadFile(from) if err != nil { t.Fatalf("Error reading file: %s", err) } if err := os.WriteFile(to, content, 0640); err != nil { t.Fatalf("Error writing file: %s", err) } } } s, opts := RunServerWithConfig("./configs/tlsverify.conf") defer s.Shutdown() endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) secureURL := fmt.Sprintf("nats://%s", endpoint) tmpCertDir := t.TempDir() certFile := filepath.Join(tmpCertDir, "client-cert.pem") keyFile := filepath.Join(tmpCertDir, "client-key.pem") caFile := filepath.Join(tmpCertDir, "ca.pem") // copy valid cert files to tmp dir filesToCopy := map[string]string{ "./configs/certs/client-cert.pem": certFile, "./configs/certs/client-key.pem": keyFile, "./configs/certs/ca.pem": caFile, } copyFiles(t, filesToCopy) dcChan, rcChan, errChan := make(chan bool, 1), make(chan bool, 1), make(chan error, 1) nc, err := nats.Connect(secureURL, nats.RootCAs(caFile), nats.ClientCert(certFile, keyFile), nats.ReconnectWait(100*time.Millisecond), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errChan <- err }), nats.DisconnectErrHandler(func(_ *nats.Conn, _ error) { dcChan <- true }), nats.ReconnectHandler(func(_ *nats.Conn) { rcChan <- true }), ) if err != nil { t.Fatalf("Failed to create (TLS) connection: %v", err) } defer nc.Close() // overwrite client certificate files with invalid ones, those // should be loaded on server restart filesToCopy = map[string]string{ "./configs/certs/client-cert-invalid.pem": certFile, "./configs/certs/client-key-invalid.pem": keyFile, } copyFiles(t, filesToCopy) // restart server s.Shutdown() s, _ = RunServerWithConfig("./configs/tlsverify.conf") defer s.Shutdown() // wait for disconnected signal if err := Wait(dcChan); err != nil { t.Fatal("Failed to receive disconnect signal") } // wait for reconnection error (bad certificate) select { case err := <-errChan: if !strings.Contains(err.Error(), "bad certificate") && !strings.Contains(err.Error(), "certificate required") { t.Fatalf("Expected bad certificate error; got: %s", err) } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for reconnect error") } // overwrite cert files with valid ones again, // so that subsequent reconnect attempt should succeed // when cert files are reloaded filesToCopy = map[string]string{ "./configs/certs/client-cert.pem": certFile, "./configs/certs/client-key.pem": keyFile, } copyFiles(t, filesToCopy) // wait for reconnect signal if err := Wait(rcChan); err != nil { t.Fatal("Failed to receive reconnect signal") } // pub-sub test message to make sure connection is OK omsg := []byte("Hello!") checkRecv := make(chan bool) received := 0 nc.Subscribe("foo", func(m *nats.Msg) { received++ if !bytes.Equal(m.Data, omsg) { t.Fatal("Message received does not match") } checkRecv <- true }) err = nc.Publish("foo", omsg) if err != nil { t.Fatalf("Failed to publish on secure (TLS) connection: %v", err) } nc.Flush() if err := Wait(checkRecv); err != nil { t.Fatal("Failed to receive message") } } func TestServerTLSHintConnections(t *testing.T) { s, opts := RunServerWithConfig("./configs/tls.conf") defer s.Shutdown() endpoint := fmt.Sprintf("%s:%d", opts.Host, opts.Port) secureURL := fmt.Sprintf("tls://%s:%s@%s/", opts.Username, opts.Password, endpoint) nc, err := nats.Connect(secureURL, nats.RootCAs("./configs/certs/badca.pem")) if err == nil { nc.Close() t.Fatal("Expected an error from bad RootCA file") } nc, err = nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem")) if err != nil { t.Fatalf("Failed to create secure (TLS) connection: %v", err) } defer nc.Close() } func TestClosedConnections(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, _ := nc.SubscribeSync("foo") if sub == nil { t.Fatal("Failed to create valid subscription") } // Test all API endpoints do the right thing with a closed connection. nc.Close() if err := nc.Publish("foo", nil); err != nats.ErrConnectionClosed { t.Fatalf("Publish on closed conn did not fail properly: %v\n", err) } if err := nc.PublishMsg(&nats.Msg{Subject: "foo"}); err != nats.ErrConnectionClosed { t.Fatalf("PublishMsg on closed conn did not fail properly: %v\n", err) } if err := nc.Flush(); err != nats.ErrConnectionClosed { t.Fatalf("Flush on closed conn did not fail properly: %v\n", err) } _, err := nc.Subscribe("foo", nil) if err != nats.ErrConnectionClosed { t.Fatalf("Subscribe on closed conn did not fail properly: %v\n", err) } _, err = nc.SubscribeSync("foo") if err != nats.ErrConnectionClosed { t.Fatalf("SubscribeSync on closed conn did not fail properly: %v\n", err) } _, err = nc.QueueSubscribe("foo", "bar", nil) if err != nats.ErrConnectionClosed { t.Fatalf("QueueSubscribe on closed conn did not fail properly: %v\n", err) } _, err = nc.Request("foo", []byte("help"), 10*time.Millisecond) if err != nats.ErrConnectionClosed { t.Fatalf("Request on closed conn did not fail properly: %v\n", err) } if _, err = sub.NextMsg(10); err != nats.ErrConnectionClosed { t.Fatalf("NextMessage on closed conn did not fail properly: %v\n", err) } if err = sub.Unsubscribe(); err != nats.ErrConnectionClosed { t.Fatalf("Unsubscribe on closed conn did not fail properly: %v\n", err) } } func TestErrOnConnectAndDeadlock(t *testing.T) { // We will hand run a fake server that will timeout and not return a proper // INFO proto. This is to test that we do not deadlock. Issue #18 l, e := net.Listen("tcp", ":0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) errCh := make(chan error, 1) go func() { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } errCh <- nil defer conn.Close() // Send back a mal-formed INFO. conn.Write([]byte("INFOZ \r\n")) }() go func() { natsURL := fmt.Sprintf("nats://127.0.0.1:%d/", addr.Port) nc, err := nats.Connect(natsURL) if err == nil { nc.Close() errCh <- errors.New("expected bad INFO err, got none") return } errCh <- nil }() // Setup a timer to watch for deadlock select { case e := <-errCh: if e != nil { t.Fatal(e.Error()) } case <-time.After(time.Second): t.Fatalf("Connect took too long, deadlock?") } } func TestMoreErrOnConnect(t *testing.T) { l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) case1 := make(chan bool) case2 := make(chan bool) case3 := make(chan bool) case4 := make(chan bool) errCh := make(chan error, 5) go func() { for i := 0; i < 5; i++ { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } switch i { case 0: // Send back a partial INFO and close the connection. conn.Write([]byte("INFO")) case 1: // Send just INFO conn.Write([]byte("INFO\r\n")) // Stick around a bit <-case1 case 2: info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n", addr.IP, addr.Port) // Send complete INFO conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } // Client expect +OK, send it but then something else than PONG conn.Write([]byte("+OK\r\n")) // Stick around a bit <-case2 case 3: info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n", addr.IP, addr.Port) // Send complete INFO conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } // Client expect +OK, send it but then something else than PONG conn.Write([]byte("+OK\r\nXXX\r\n")) // Stick around a bit <-case3 case 4: info := "INFO {'x'}\r\n" // Send INFO with JSON marshall error conn.Write([]byte(info)) // Stick around a bit <-case4 } conn.Close() } // Hang around until asked to quit <-done }() natsURL := fmt.Sprintf("nats://127.0.0.1:%d", addr.Port) if nc, err := nats.Connect(natsURL, nats.Timeout(20*time.Millisecond)); err == nil { nc.Close() t.Fatal("Expected error, got none") } if nc, err := nats.Connect(natsURL, nats.Timeout(20*time.Millisecond)); err == nil { close(case1) nc.Close() t.Fatal("Expected error, got none") } close(case1) opts := nats.GetDefaultOptions() opts.Servers = []string{natsURL} opts.Timeout = 20 * time.Millisecond opts.Verbose = true if nc, err := opts.Connect(); err == nil { close(case2) nc.Close() t.Fatal("Expected error, got none") } close(case2) if nc, err := opts.Connect(); err == nil { close(case3) nc.Close() t.Fatal("Expected error, got none") } close(case3) if nc, err := opts.Connect(); err == nil { close(case4) nc.Close() t.Fatal("Expected error, got none") } close(case4) close(done) checkErrChannel(t, errCh) } func TestErrOnMaxPayloadLimit(t *testing.T) { expectedMaxPayload := int64(10) serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":%d}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) // Send back an INFO message with custom max payload size on connect. var conn net.Conn var err error errCh := make(chan error, 1) go func() { conn, err = l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port, expectedMaxPayload) conn.Write([]byte(info)) // Read connect and ping commands sent from the client line := make([]byte, 111) _, err := conn.Read(line) if err != nil { errCh <- fmt.Errorf("expected CONNECT and PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Hang around a bit to not err on EOF in client. time.Sleep(250 * time.Millisecond) }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() got := nc.MaxPayload() if got != expectedMaxPayload { t.Fatalf("Expected MaxPayload to be %d, got: %d", expectedMaxPayload, got) } err = nc.Publish("hello", []byte("hello world")) if err != nats.ErrMaxPayload { t.Fatalf("Expected to fail trying to send more than max payload, got: %s", err) } err = nc.Publish("hello", []byte("a")) if err != nil { t.Fatalf("Expected to succeed trying to send less than max payload, got: %s", err) } checkErrChannel(t, errCh) } func TestConnectVerbose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() o := nats.GetDefaultOptions() o.Verbose = true nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } nc.Close() } func getStacks(all bool) string { var ( stacks []byte stacksSize = 10000 n int ) for { stacks = make([]byte, stacksSize) n = runtime.Stack(stacks, all) if n == stacksSize { stacksSize *= 2 continue } break } return string(stacks[:n]) } func isRunningInAsyncCBDispatcher() error { strStacks := getStacks(false) if strings.Contains(strStacks, "asyncCBDispatcher") { return nil } return fmt.Errorf("callback not executed from dispatcher:\n %s", strStacks) } func isAsyncDispatcherRunning() bool { strStacks := getStacks(true) return strings.Contains(strStacks, "asyncCBDispatcher") } func TestCallbacksOrder(t *testing.T) { authS, authSOpts := RunServerWithConfig("./configs/tls.conf") defer authS.Shutdown() s := RunDefaultServer() defer s.Shutdown() firstDisconnect := true var connTime, dtime1, dtime2, rtime, atime1, atime2, ctime time.Time cbErrors := make(chan error, 20) connected := make(chan bool) reconnected := make(chan bool) closed := make(chan bool) asyncErr := make(chan bool, 2) recvCh := make(chan bool, 2) recvCh1 := make(chan bool) recvCh2 := make(chan bool) connCh := func(nc *nats.Conn) { if err := isRunningInAsyncCBDispatcher(); err != nil { cbErrors <- err connected <- true return } time.Sleep(50 * time.Millisecond) connTime = time.Now() connected <- true } dch := func(nc *nats.Conn) { if err := isRunningInAsyncCBDispatcher(); err != nil { cbErrors <- err return } time.Sleep(100 * time.Millisecond) if firstDisconnect { firstDisconnect = false dtime1 = time.Now() } else { dtime2 = time.Now() } } rch := func(nc *nats.Conn) { if err := isRunningInAsyncCBDispatcher(); err != nil { cbErrors <- err reconnected <- true return } time.Sleep(50 * time.Millisecond) rtime = time.Now() reconnected <- true } ech := func(nc *nats.Conn, sub *nats.Subscription, err error) { if err := isRunningInAsyncCBDispatcher(); err != nil { cbErrors <- err asyncErr <- true return } if sub.Subject == "foo" { time.Sleep(20 * time.Millisecond) atime1 = time.Now() } else { atime2 = time.Now() } asyncErr <- true } cch := func(nc *nats.Conn) { if err := isRunningInAsyncCBDispatcher(); err != nil { cbErrors <- err closed <- true return } ctime = time.Now() closed <- true } url := net.JoinHostPort(authSOpts.Host, strconv.Itoa(authSOpts.Port)) url = "nats://" + url + "," + nats.DefaultURL nc, err := nats.Connect(url, nats.ConnectHandler(connCh), nats.DisconnectHandler(dch), nats.ReconnectHandler(rch), nats.ClosedHandler(cch), nats.ErrorHandler(ech), nats.ReconnectWait(50*time.Millisecond), nats.ReconnectJitter(0, 0), nats.DontRandomize()) if err != nil { t.Fatalf("Unable to connect: %v\n", err) } defer nc.Close() // Wait for notification on connection established err = Wait(connected) if err != nil { t.Fatal("Did not get the connected callback") } ncp, err := nats.Connect(nats.DefaultURL, nats.ReconnectWait(50*time.Millisecond)) if err != nil { t.Fatalf("Unable to connect: %v\n", err) } defer ncp.Close() // Wait to make sure that if we have closed (incorrectly) the // asyncCBDispatcher during the connect process, this is caught here. time.Sleep(time.Second) s.Shutdown() s = RunDefaultServer() defer s.Shutdown() if err := Wait(reconnected); err != nil { t.Fatal("Did not get the reconnected callback") } var sub1, sub2 *nats.Subscription recv := func(m *nats.Msg) { // Signal that one message is received recvCh <- true // We will now block if m.Subject == "foo" { <-recvCh1 } else { <-recvCh2 } m.Sub.Unsubscribe() } sub1, err = nc.Subscribe("foo", recv) if err != nil { t.Fatalf("Unable to create subscription: %v\n", err) } sub1.SetPendingLimits(1, 100000) sub2, err = nc.Subscribe("bar", recv) if err != nil { t.Fatalf("Unable to create subscription: %v\n", err) } sub2.SetPendingLimits(1, 100000) nc.Flush() ncp.Publish("foo", []byte("test")) ncp.Publish("bar", []byte("test")) ncp.Flush() // Wait notification that message were received err = Wait(recvCh) if err == nil { err = Wait(recvCh) } if err != nil { t.Fatal("Did not receive message") } for i := 0; i < 2; i++ { ncp.Publish("foo", []byte("test")) ncp.Publish("bar", []byte("test")) } ncp.Flush() if err := Wait(asyncErr); err != nil { t.Fatal("Did not get the async callback") } if err := Wait(asyncErr); err != nil { t.Fatal("Did not get the async callback") } close(recvCh1) close(recvCh2) nc.Close() if err := Wait(closed); err != nil { t.Fatal("Did not get the close callback") } if len(cbErrors) > 0 { t.Fatalf("%v", <-cbErrors) } if (connTime == time.Time{}) || (dtime1 == time.Time{}) || (dtime2 == time.Time{}) || (rtime == time.Time{}) || (atime1 == time.Time{}) || (atime2 == time.Time{}) || (ctime == time.Time{}) { t.Fatalf("Some callbacks did not fire:\n%v\n%v\n%v\n%v\n%v\n%v", dtime1, rtime, atime1, atime2, dtime2, ctime) } if dtime1.Before(connTime) || rtime.Before(dtime1) || dtime2.Before(rtime) || atime2.Before(atime1) || ctime.Before(atime2) { t.Fatalf("Wrong callback order:\n%v\n%v\n%v\n%v\n%v\n%v\n%v", connTime, dtime1, rtime, atime1, atime2, dtime2, ctime) } // Close the other connection ncp.Close() // Check that the go routine is gone. Allow plenty of time // to avoid flappers. timeout := time.Now().Add(5 * time.Second) for time.Now().Before(timeout) { if !isAsyncDispatcherRunning() { // Good, we are done! return } time.Sleep(50 * time.Millisecond) } t.Fatalf("The async callback dispatcher(s) should have stopped") } func TestReconnectErrHandler(t *testing.T) { handler := func(ch chan bool) func(*nats.Conn, error) { return func(*nats.Conn, error) { ch <- true } } t.Run("with RetryOnFailedConnect, MaxReconnects(-1), no connection", func(t *testing.T) { opts := test.DefaultTestOptions // Server should not be reachable to test this one opts.Port = 4223 s := RunServerWithOptions(&opts) defer s.Shutdown() reconnectErr := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ReconnectErrHandler(handler(reconnectErr)), nats.RetryOnFailedConnect(true), nats.MaxReconnects(-1)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() if err = Wait(reconnectErr); err != nil { t.Fatal("Timeout waiting for reconnect error handler") } }) } func TestConnectHandler(t *testing.T) { handler := func(ch chan bool) func(*nats.Conn) { return func(*nats.Conn) { ch <- true } } t.Run("with RetryOnFailedConnect, connection established", func(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() connected := make(chan bool) reconnected := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected)), nats.RetryOnFailedConnect(true)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() if err = Wait(connected); err != nil { t.Fatal("Timeout waiting for connect handler") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } }) t.Run("with RetryOnFailedConnect, connection failed", func(t *testing.T) { connected := make(chan bool) reconnected := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected)), nats.RetryOnFailedConnect(true)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() if err = WaitTime(connected, 100*time.Millisecond); err == nil { t.Fatal("Connected handler should not have been invoked") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } }) t.Run("no RetryOnFailedConnect, connection established", func(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() connected := make(chan bool) reconnected := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected))) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() if err = Wait(connected); err != nil { t.Fatal("Timeout waiting for connect handler") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } }) t.Run("no RetryOnFailedConnect, connection failed", func(t *testing.T) { connected := make(chan bool) reconnected := make(chan bool) _, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected))) if err == nil { t.Fatalf("Expected error on connect, got nil") } if err = WaitTime(connected, 100*time.Millisecond); err == nil { t.Fatal("Connected handler should not have been invoked") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } }) t.Run("with RetryOnFailedConnect, initial connection failed, reconnect successful", func(t *testing.T) { connected := make(chan bool) reconnected := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected)), nats.RetryOnFailedConnect(true), nats.ReconnectWait(100*time.Millisecond)) if err != nil { t.Fatalf("Expected error on connect, got nil") } defer nc.Close() s := RunDefaultServer() defer s.Shutdown() if err != nil { t.Fatalf("Expected error on connect, got nil") } if err = Wait(connected); err != nil { t.Fatal("Timeout waiting for reconnect handler") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } }) t.Run("with RetryOnFailedConnect, initial connection successful, server restart", func(t *testing.T) { connected := make(chan bool) reconnected := make(chan bool) s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.ConnectHandler(handler(connected)), nats.ReconnectHandler(handler(reconnected)), nats.RetryOnFailedConnect(true), nats.ReconnectWait(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() if err != nil { t.Fatalf("Unexpected error: %v", err) } if err = Wait(connected); err != nil { t.Fatal("Timeout waiting for connect handler") } if err = WaitTime(reconnected, 100*time.Millisecond); err == nil { t.Fatal("Reconnect handler should not have been invoked") } s.Shutdown() s = RunDefaultServer() defer s.Shutdown() if err = Wait(reconnected); err != nil { t.Fatal("Timeout waiting for reconnect handler") } if err = WaitTime(connected, 100*time.Millisecond); err == nil { t.Fatal("Connected handler should not have been invoked") } }) } func TestFlushReleaseOnClose(t *testing.T) { serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) errCh := make(chan error, 1) go func() { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Hang around until asked to quit <-done }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.AllowReconnect = false opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() // First try a FlushTimeout() and make sure we timeout if err := nc.FlushTimeout(50 * time.Millisecond); err == nil || err != nats.ErrTimeout { t.Fatalf("Expected a timeout error, got: %v", err) } go func() { time.Sleep(50 * time.Millisecond) nc.Close() }() if err := nc.Flush(); err == nil { t.Fatal("Expected error on Flush() released by Close()") } close(done) checkErrChannel(t, errCh) } func TestMaxPendingOut(t *testing.T) { serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) cch := make(chan bool) errCh := make(chan error, 1) go func() { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Hang around until asked to quit <-done }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.PingInterval = 20 * time.Millisecond opts.MaxPingsOut = 2 opts.AllowReconnect = false opts.ClosedCB = func(_ *nats.Conn) { cch <- true } opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() // After 60 ms, we should have closed the connection time.Sleep(100 * time.Millisecond) if err := Wait(cch); err != nil { t.Fatal("Failed to get ClosedCB") } if nc.LastError() != nats.ErrStaleConnection { t.Fatalf("Expected to get %v, got %v", nats.ErrStaleConnection, nc.LastError()) } close(done) checkErrChannel(t, errCh) } func TestErrInReadLoop(t *testing.T) { serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) cch := make(chan bool) errCh := make(chan error, 1) go func() { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Read (and ignore) the SUB from the client if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected SUB from client, got: %s", err) return } // Send something that should make the subscriber fail. conn.Write([]byte("Ivan")) // Hang around until asked to quit <-done }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.AllowReconnect = false opts.ClosedCB = func(_ *nats.Conn) { cch <- true } opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() received := int64(0) nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt64(&received, 1) }) if err := Wait(cch); err != nil { t.Fatal("Failed to get ClosedCB") } recv := int(atomic.LoadInt64(&received)) if recv != 0 { t.Fatalf("Should not have received messages, got: %d", recv) } close(done) checkErrChannel(t, errCh) } func TestErrStaleConnection(t *testing.T) { serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) dch := make(chan bool) rch := make(chan bool) cch := make(chan bool) sch := make(chan bool) firstDisconnect := true errCh := make(chan error, 1) go func() { for i := 0; i < 2; i++ { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) if i == 0 { // Wait a tiny, and simulate a Stale Connection time.Sleep(50 * time.Millisecond) conn.Write([]byte("-ERR 'Stale Connection'\r\n")) // The client should try to reconnect. When getting the // disconnected callback, it will close this channel. <-sch // Close the connection and go back to accept the new // connection. conn.Close() } else { // Hang around a bit <-done } } }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.AllowReconnect = true opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { // Interested only in the first disconnect cb if firstDisconnect { firstDisconnect = false close(sch) dch <- true } } opts.ReconnectedCB = func(_ *nats.Conn) { rch <- true } opts.ClosedCB = func(_ *nats.Conn) { cch <- true } opts.ReconnectWait = 20 * time.Millisecond nats.ReconnectJitter(0, 0)(&opts) opts.MaxReconnect = 100 opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() // We should first gets disconnected if err := Wait(dch); err != nil { t.Fatal("Failed to get DisconnectedErrCB") } // Then reconneted.. if err := Wait(rch); err != nil { t.Fatal("Failed to get ReconnectedCB") } // Now close the connection nc.Close() // We should get the closed cb if err := Wait(cch); err != nil { t.Fatal("Failed to get ClosedCB") } close(done) checkErrChannel(t, errCh) } func TestServerErrorClosesConnection(t *testing.T) { serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n" l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) done := make(chan bool) dch := make(chan bool) cch := make(chan bool) serverSentError := "Any Error" reconnected := int64(0) errCh := make(chan error, 1) go func() { conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf(serverInfo, addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client br := bufio.NewReaderSize(conn, 1024) if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected CONNECT from client, got: %s", err) return } if _, err := br.ReadString('\n'); err != nil { errCh <- fmt.Errorf("expected PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Wait a tiny, and simulate a Stale Connection time.Sleep(50 * time.Millisecond) conn.Write([]byte("-ERR '" + serverSentError + "'\r\n")) // Hang around a bit <-done }() // Wait for server mock to start time.Sleep(100 * time.Millisecond) natsURL := fmt.Sprintf("nats://%s:%d", addr.IP, addr.Port) opts := nats.GetDefaultOptions() opts.AllowReconnect = true opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } opts.ReconnectedCB = func(_ *nats.Conn) { atomic.AddInt64(&reconnected, 1) } opts.ClosedCB = func(_ *nats.Conn) { cch <- true } opts.ReconnectWait = 20 * time.Millisecond nats.ReconnectJitter(0, 0)(&opts) opts.MaxReconnect = 100 opts.Servers = []string{natsURL} nc, err := opts.Connect() if err != nil { t.Fatalf("Expected INFO message with custom max payload, got: %s", err) } defer nc.Close() // The server sends an error that should cause the client to simply close // the connection. // We should first gets disconnected if err := Wait(dch); err != nil { t.Fatal("Failed to get DisconnectedErrCB") } // We should get the closed cb if err := Wait(cch); err != nil { t.Fatal("Failed to get ClosedCB") } // We should not have been reconnected if atomic.LoadInt64(&reconnected) != 0 { t.Fatal("ReconnectedCB should not have been invoked") } // Check LastError(), it should be "nats: " lastErr := nc.LastError().Error() expectedErr := "nats: " + serverSentError if lastErr != expectedErr { t.Fatalf("Expected error: '%v', got '%v'", expectedErr, lastErr) } close(done) checkErrChannel(t, errCh) } func TestUseDefaultTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() opts := &nats.Options{ Servers: []string{nats.DefaultURL}, } nc, err := opts.Connect() if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc.Close() if nc.Opts.Timeout != nats.DefaultTimeout { t.Fatalf("Expected Timeout to be set to %v, got %v", nats.DefaultTimeout, nc.Opts.Timeout) } } func TestLastErrorNoRace(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() // Access LastError in disconnection and closed handlers to make sure // that there is no race. It is possible in some cases that // nc.LastError() returns a non nil error. We don't care here about the // returned value. dch := func(c *nats.Conn) { c.LastError() } closedCh := make(chan struct{}) cch := func(c *nats.Conn) { c.LastError() closedCh <- struct{}{} } nc, err := nats.Connect(nats.DefaultURL, nats.DisconnectHandler(dch), nats.ClosedHandler(cch), nats.MaxReconnects(-1), nats.ReconnectWait(5*time.Millisecond), nats.ReconnectJitter(0, 0)) if err != nil { t.Fatalf("Unable to connect: %v\n", err) } defer nc.Close() // Restart the server several times to trigger a reconnection. for i := 0; i < 10; i++ { s.Shutdown() time.Sleep(10 * time.Millisecond) s = RunDefaultServer() } nc.Close() s.Shutdown() select { case <-closedCh: case <-time.After(5 * time.Second): t.Fatal("Timeout waiting for the closed callback") } } type customDialer struct { ch chan bool } func (cd *customDialer) Dial(network, address string) (net.Conn, error) { cd.ch <- true return nil, errors.New("on purpose") } func TestUseCustomDialer(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() dialer := &net.Dialer{ Timeout: 10 * time.Second, FallbackDelay: -1, } opts := &nats.Options{ Servers: []string{nats.DefaultURL}, Dialer: dialer, } nc, err := opts.Connect() if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc.Close() if nc.Opts.Dialer != dialer { t.Fatalf("Expected Dialer to be set to %v, got %v", dialer, nc.Opts.Dialer) } // Should be possible to set via variadic func based Option setter dialer2 := &net.Dialer{ Timeout: 5 * time.Second, FallbackDelay: -1, } nc2, err := nats.Connect(nats.DefaultURL, nats.Dialer(dialer2)) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc2.Close() if nc2.Opts.Dialer.FallbackDelay > 0 { t.Fatalf("Expected for dialer to be customized to disable dual stack support") } // By default, dialer still uses the DefaultTimeout nc3, err := nats.Connect(nats.DefaultURL) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc3.Close() if nc3.Opts.Dialer.Timeout != nats.DefaultTimeout { t.Fatalf("Expected Dialer.Timeout to be set to %v, got %v", nats.DefaultTimeout, nc.Opts.Dialer.Timeout) } // Create custom dialer that return error on Dial(). cdialer := &customDialer{ch: make(chan bool, 10)} // When both Dialer and CustomDialer are set, CustomDialer // should take precedence. That means that the connection // should fail for these two set of options. options := []*nats.Options{ {Dialer: dialer, CustomDialer: cdialer}, {CustomDialer: cdialer}, } for _, o := range options { o.Servers = []string{nats.DefaultURL} nc, err := o.Connect() // As of now, Connect() would not return the actual dialer error, // instead it returns "no server available for connections". // So use go channel to ensure that custom dialer's Dial() method // was invoked. if err == nil { if nc != nil { nc.Close() } t.Fatal("Expected error, got none") } if err := Wait(cdialer.ch); err != nil { t.Fatal("Did not get our notification") } } // Same with variadic foptions := [][]nats.Option{ {nats.Dialer(dialer), nats.SetCustomDialer(cdialer)}, {nats.SetCustomDialer(cdialer)}, } for _, fos := range foptions { nc, err := nats.Connect(nats.DefaultURL, fos...) if err == nil { if nc != nil { nc.Close() } t.Fatal("Expected error, got none") } if err := Wait(cdialer.ch); err != nil { t.Fatal("Did not get our notification") } } } func TestDefaultOptionsDialer(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() opts1 := nats.GetDefaultOptions() opts2 := nats.GetDefaultOptions() nc1, err := opts1.Connect() if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc1.Close() nc2, err := opts2.Connect() if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc2.Close() if nc1.Opts.Dialer == nc2.Opts.Dialer { t.Fatalf("Expected each connection to have its own dialer") } } type lowWriteBufferDialer struct{} func (d *lowWriteBufferDialer) Dial(network, address string) (net.Conn, error) { c, err := net.Dial(network, address) if err != nil { return nil, err } c.(*net.TCPConn).SetWriteBuffer(100) return c, nil } func TestCustomFlusherTimeout(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } s := RunDefaultServer() defer s.Shutdown() // Reasonably large flusher timeout will not induce errors // when we can flush fast nc1, err := nats.Connect(nats.DefaultURL, nats.FlusherTimeout(10*time.Second)) if err != nil { t.Fatalf("Expected to be able to connect, got: %s", err) } doneCh := make(chan struct{}, 1) // We want to have a payload size that is big enough so that after // few publish, the socket buffer will be full and produce the timeout. // Since we try to produce the error in the flusher and not the publish // call itself, use a size that is a bit less than the internal // buffer used by the library. payloadBytes := make([]byte, 32*1024-200) errCh := make(chan error, 1) wg := sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() for { select { case <-time.After(200 * time.Millisecond): err := nc1.Publish("hello", payloadBytes) if err != nil { errCh <- err return } case <-doneCh: return } } }() defer nc1.Close() l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) fsDoneCh := make(chan struct{}, 1) fsErrCh := make(chan error, 1) go func() { defer wg.Done() serverInfo := "INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":%d}\r\n" conn, err := l.Accept() if err != nil { fsErrCh <- err return } defer conn.Close() // Make it small on purpose if err := conn.(*net.TCPConn).SetReadBuffer(1024); err != nil { fsErrCh <- err return } info := fmt.Sprintf(serverInfo, addr.IP, addr.Port, 1024*1024) conn.Write([]byte(info)) // Read connect and ping commands sent from the client line := make([]byte, 100) _, err = conn.Read(line) if err != nil { fsErrCh <- fmt.Errorf("Expected CONNECT and PING from client, got: %v", err) return } conn.Write([]byte("PONG\r\n")) // Don't consume anything at this point and wait to be notified // that we are done. <-fsDoneCh fsErrCh <- nil }() nc2, err := nats.Connect( // URL to fake server fmt.Sprintf("nats://127.0.0.1:%d", addr.Port), // Use custom dialer so we can set write buffer to low value nats.SetCustomDialer(&lowWriteBufferDialer{}), // Use short flusher timeout to trigger the error nats.FlusherTimeout(15*time.Millisecond), // Make sure the library does not close connection due // to pings for this test. nats.PingInterval(20*time.Second), // No reconnect nats.NoReconnect(), // Notify when connection lost nats.ClosedHandler(func(_ *nats.Conn) { doneCh <- struct{}{} }), // Use error handler to silence the stderr output nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) { })) if err != nil { t.Fatalf("Expected to be able to connect, got: %s", err) } defer nc2.Close() var ( pubErr error nc2Err error tm = time.NewTimer(5 * time.Second) ) forLoop: for { select { case <-time.After(100 * time.Millisecond): // We are trying to get the flusher to report the error, but it // is possible that the Publish() call itself flushes and we don't // want to fail the test for that. pubErr = nc2.Publish("world", payloadBytes) nc2Err = nc2.LastError() if nc2Err != nil { break forLoop } case <-tm.C: // We got an error, but not from flusher. Don't fail yet. Will check // if this is a timeout error as expected. if pubErr != nil { break forLoop } t.Fatalf("Timeout publishing messages") } } // Notify fake server that it can stop close(fsDoneCh) // Wait for go routines to end wg.Wait() // Make sure there were no error in the fake server if err := <-fsErrCh; err != nil { t.Fatalf("Fake server reported: %v", err) } // One of those two are guaranteed to be set. err = nc2Err if err == nil { err = pubErr } // Check that error is a timeout error as expected. ope, ok := err.(*net.OpError) if !ok { t.Fatalf("expected a net.Error, got %v", err) } if !ope.Timeout() { t.Fatalf("expected a timeout, got %v", err) } if ope.Op != "write" { t.Fatalf("expected a write error, got %v", err) } // Check that there is no error from nc1 select { case e := <-errCh: t.Fatal(e) default: } } func TestNewServers(t *testing.T) { s1Opts := test.DefaultTestOptions s1Opts.Host = "127.0.0.1" s1Opts.Port = 4222 s1Opts.Cluster.Host = "127.0.0.1" s1Opts.Cluster.Port = 6222 s1 := test.RunServer(&s1Opts) defer s1.Shutdown() s2Opts := test.DefaultTestOptions s2Opts.Host = "127.0.0.1" s2Opts.Port = 4223 s2Opts.Port = s1Opts.Port + 1 s2Opts.Cluster.Host = "127.0.0.1" s2Opts.Cluster.Port = 6223 s2Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222") s2 := test.RunServer(&s2Opts) defer s2.Shutdown() ch := make(chan bool) cb := func(_ *nats.Conn) { ch <- true } url := fmt.Sprintf("nats://%s:%d", s1Opts.Host, s1Opts.Port) nc1, err := nats.Connect(url, nats.DiscoveredServersHandler(cb)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc1.Close() nc2, err := nats.Connect(url) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc2.Close() nc2.SetDiscoveredServersHandler(cb) opts := nats.GetDefaultOptions() opts.Url = nats.DefaultURL opts.DiscoveredServersCB = cb nc3, err := opts.Connect() if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc3.Close() // Make sure that handler is not invoked on initial connect. select { case <-ch: t.Fatalf("Handler should not have been invoked") case <-time.After(500 * time.Millisecond): } // Start a new server. s3Opts := test.DefaultTestOptions s1Opts.Host = "127.0.0.1" s1Opts.Port = 4224 s3Opts.Port = s2Opts.Port + 1 s3Opts.Cluster.Host = "127.0.0.1" s3Opts.Cluster.Port = 6224 s3Opts.Routes = server.RoutesFromStr("nats://127.0.0.1:6222") s3 := test.RunServer(&s3Opts) defer s3.Shutdown() // The callbacks should have been invoked if err := Wait(ch); err != nil { t.Fatal("Did not get our callback") } if err := Wait(ch); err != nil { t.Fatal("Did not get our callback") } if err := Wait(ch); err != nil { t.Fatal("Did not get our callback") } } func TestBarrier(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() pubMsgs := int32(0) ch := make(chan bool, 1) sub1, err := nc.Subscribe("pub", func(_ *nats.Msg) { atomic.AddInt32(&pubMsgs, 1) time.Sleep(250 * time.Millisecond) }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } sub2, err := nc.Subscribe("close", func(_ *nats.Msg) { // The "close" message was sent/received lat, but // because we are dealing with different subscriptions, // which are dispatched by different dispatchers, and // because the "pub" subscription is delayed, this // callback is likely to be invoked before the sub1's // second callback is invoked. Using the Barrier call // here will ensure that the given function will be invoked // after the preceding messages have been dispatched. nc.Barrier(func() { res := atomic.LoadInt32(&pubMsgs) == 2 ch <- res }) }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // Send 2 "pub" messages followed by a "close" message for i := 0; i < 2; i++ { if err := nc.Publish("pub", []byte("pub msg")); err != nil { t.Fatalf("Error on publish: %v", err) } } if err := nc.Publish("close", []byte("closing")); err != nil { t.Fatalf("Error on publish: %v", err) } select { case ok := <-ch: if !ok { t.Fatal("The barrier function was invoked before the second message") } case <-time.After(2 * time.Second): t.Fatal("Waited for too long...") } // Remove all subs sub1.Unsubscribe() sub2.Unsubscribe() // Barrier should be invoked in place. Since we use buffered channel // we are ok. nc.Barrier(func() { ch <- true }) if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } if _, err := nc.Subscribe("foo", func(m *nats.Msg) { // To check that the Barrier() function works if the subscription // is unsubscribed after the call was made, sleep a bit here. time.Sleep(250 * time.Millisecond) m.Sub.Unsubscribe() }); err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } // We need to Flush here to make sure that message has been received // and posted to subscription's internal queue before calling Barrier. if err := nc.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } nc.Barrier(func() { ch <- true }) if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } // Test with AutoUnsubscribe now... sub1, err = nc.Subscribe("foo", func(m *nats.Msg) { // Since we auto-unsubscribe with 1, there should not be another // invocation of this callback, but the Barrier should still be // invoked. nc.Barrier(func() { ch <- true }) }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } sub1.AutoUnsubscribe(1) // Send 2 messages and flush for i := 0; i < 2; i++ { if err := nc.Publish("foo", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } } if err := nc.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } // Check barrier was invoked if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } // Check that Barrier only affects asynchronous subscriptions sub1, err = nc.Subscribe("foo", func(m *nats.Msg) { nc.Barrier(func() { ch <- true }) }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } syncSub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgChan := make(chan *nats.Msg, 1) chanSub, err := nc.ChanSubscribe("foo", msgChan) if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } if err := nc.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } // Check barrier was invoked even if we did not yet consume // from the 2 other type of subscriptions if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } if _, err := syncSub.NextMsg(time.Second); err != nil { t.Fatalf("Sync sub did not receive the message") } select { case <-msgChan: case <-time.After(time.Second): t.Fatal("Chan sub did not receive the message") } chanSub.Unsubscribe() syncSub.Unsubscribe() sub1.Unsubscribe() atomic.StoreInt32(&pubMsgs, 0) // Check barrier does not prevent new messages to be delivered. sub1, err = nc.Subscribe("foo", func(_ *nats.Msg) { if pm := atomic.AddInt32(&pubMsgs, 1); pm == 1 { nc.Barrier(func() { nc.Publish("foo", []byte("second")) nc.Flush() }) } else if pm == 2 { ch <- true } }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("first")); err != nil { t.Fatalf("Error on publish: %v", err) } if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } sub1.Unsubscribe() // Check that barrier works if called before connection // is closed. if _, err := nc.Subscribe("bar", func(_ *nats.Msg) { nc.Barrier(func() { ch <- true }) nc.Close() }); err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("bar", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } // This could fail if the connection is closed before we get // here. nc.Flush() if err := Wait(ch); err != nil { t.Fatal("Barrier function was not invoked") } // Finally, check that if connection is closed, Barrier returns // an error. if err := nc.Barrier(func() { ch <- true }); err != nats.ErrConnectionClosed { t.Fatalf("Expected error %v, got %v", nats.ErrConnectionClosed, err) } // Check that one can call connection methods from Barrier // when there is no async subscriptions nc = NewDefaultConnection(t) defer nc.Close() if err := nc.Barrier(func() { ch <- nc.TLSRequired() }); err != nil { t.Fatalf("Error on Barrier: %v", err) } if err := Wait(ch); err != nil { t.Fatal("Barrier was blocked") } } func TestReceiveInfoRightAfterFirstPong(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Error on listen: %v", err) } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() c, err := tl.Accept() if err != nil { return } defer c.Close() // Send the initial INFO c.Write([]byte("INFO {}\r\n")) buf := make([]byte, 0, 100) b := make([]byte, 100) for { n, err := c.Read(b) if err != nil { return } buf = append(buf, b[:n]...) if bytes.Contains(buf, []byte("PING\r\n")) { break } } // Send PONG and following INFO in one go (or at least try). // The processing of PONG in sendConnect() should leave the // rest for the readLoop to process. c.Write([]byte(fmt.Sprintf("PONG\r\nINFO {\"connect_urls\":[\"127.0.0.1:%d\", \"me:1\"]}\r\n", addr.Port))) // Wait for client to disconnect for { if _, err := c.Read(buf); err != nil { return } } }() nc, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() var ( ds []string timeout = time.Now().Add(2 * time.Second) ok = false ) for time.Now().Before(timeout) { ds = nc.DiscoveredServers() if len(ds) == 1 && ds[0] == "nats://me:1" { ok = true break } time.Sleep(50 * time.Millisecond) } nc.Close() wg.Wait() if !ok { t.Fatalf("Unexpected discovered servers: %v", ds) } } func TestReceiveInfoWithEmptyConnectURLs(t *testing.T) { ready := make(chan error, 2) ch := make(chan bool, 1) wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() ports := []int{4222, 4223} for i := 0; i < 2; i++ { l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", ports[i])) if err != nil { ready <- fmt.Errorf("error on listen: %v", err) return } tl := l.(*net.TCPListener) defer tl.Close() ready <- nil c, err := tl.Accept() if err != nil { return } defer c.Close() // Send the initial INFO c.Write([]byte(fmt.Sprintf("INFO {\"server_id\":\"server%d\"}\r\n", (i + 1)))) buf := make([]byte, 0, 100) b := make([]byte, 100) for { n, err := c.Read(b) if err != nil { return } buf = append(buf, b[:n]...) if bytes.Contains(buf, []byte("PING\r\n")) { break } } if i == 0 { // Send PONG and following INFO in one go (or at least try). // The processing of PONG in sendConnect() should leave the // rest for the readLoop to process. c.Write([]byte("PONG\r\nINFO {\"server_id\":\"server1\",\"connect_urls\":[\"127.0.0.1:4222\", \"127.0.0.1:4223\", \"127.0.0.1:4224\"]}\r\n")) // Wait for the notification <-ch // Close the connection in our side and go back into accept c.Close() } else { // Send no connect ULRs (as if this was an older server that could in some cases // send an empty array) c.Write([]byte("PONG\r\nINFO {\"server_id\":\"server2\"}\r\n")) // Wait for client to disconnect for { if _, err := c.Read(buf); err != nil { return } } } } }() // Wait for listener to be up and running e := <-ready if e != nil { t.Fatal(e.Error()) } rch := make(chan bool) nc, err := nats.Connect("nats://127.0.0.1:4222", nats.ReconnectWait(50*time.Millisecond), nats.ReconnectJitter(0, 0), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true })) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() var ( ds []string timeout = time.Now().Add(2 * time.Second) ok = false ) for time.Now().Before(timeout) { ds = nc.DiscoveredServers() if len(ds) == 2 { if (ds[0] == "nats://127.0.0.1:4223" && ds[1] == "nats://127.0.0.1:4224") || (ds[0] == "nats://127.0.0.1:4224" && ds[1] == "nats://127.0.0.1:4223") { ok = true break } } time.Sleep(50 * time.Millisecond) } if !ok { t.Fatalf("Unexpected discovered servers: %v", ds) } // Make the server close our connection ch <- true // Wait for the reconnect if err := Wait(rch); err != nil { t.Fatal("Did not reconnect") } // Discovered servers should still contain nats://me:1 ds = nc.DiscoveredServers() if len(ds) != 2 || !((ds[0] == "nats://127.0.0.1:4223" && ds[1] == "nats://127.0.0.1:4224") || (ds[0] == "nats://127.0.0.1:4224" && ds[1] == "nats://127.0.0.1:4223")) { t.Fatalf("Unexpected discovered servers list: %v", ds) } nc.Close() wg.Wait() } func TestConnectWithSimplifiedURLs(t *testing.T) { urls := []string{ "nats://127.0.0.1:4222", "nats://127.0.0.1:", "nats://127.0.0.1", "127.0.0.1:", "127.0.0.1", } connect := func(t *testing.T, url string, useRootCA bool) { t.Helper() var opt nats.Option if useRootCA { opt = nats.RootCAs("./configs/certs/ca.pem") } nc, err := nats.Connect(url, opt) if err != nil { t.Fatalf("URL %q expected to connect, got %v", url, err) } nc.Close() } // Start a server that listens on default port 4222. s := RunDefaultServer() defer s.Shutdown() // Try for every connection in the urls array. for _, u := range urls { connect(t, u, false) } s.Shutdown() // Use this to build the options for us... s, opts := RunServerWithConfig("configs/tls.conf") s.Shutdown() // Now change listen port to 4222 and remove auth opts.Port = 4222 opts.Username = "" opts.Password = "" // and restart the server s = RunServerWithOptions(opts) defer s.Shutdown() // Test again against a server that wants TLS and check // that we automatically switch to Secure. for _, u := range urls { connect(t, u, true) } } func TestNilOpts(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() // Test a single nil option var o1, o2, o3 nats.Option nc, err := nats.Connect(nats.DefaultURL, o1) if err != nil { t.Fatalf("Unexpected error with one nil option: %v", err) } nc.Close() // Test nil, opt, nil o2 = nats.ReconnectBufSize(2222) nc, err = nats.Connect(nats.DefaultURL, o1, o2, o3) if err != nil { t.Fatalf("Unexpected error with multiple nil options: %v", err) } defer nc.Close() // check that the opt was set if nc.Opts.ReconnectBufSize != 2222 { t.Fatal("Unexpected error: option not set.") } } func TestGetClientID(t *testing.T) { if serverVersionAtLeast(1, 2, 0) != nil { t.SkipNow() } optsA := test.DefaultTestOptions optsA.Port = -1 optsA.Cluster.Port = -1 optsA.Cluster.Name = "test" srvA := RunServerWithOptions(&optsA) defer srvA.Shutdown() ch := make(chan bool, 1) nc1, err := nats.Connect(srvA.ClientURL(), nats.DiscoveredServersHandler(func(_ *nats.Conn) { ch <- true }), nats.ReconnectHandler(func(_ *nats.Conn) { ch <- true })) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc1.Close() cid, err := nc1.GetClientID() if err != nil { t.Fatalf("Error getting CID: %v", err) } if cid == 0 { t.Fatal("Unexpected cid value, make sure server is 1.2.0+") } // Start a second server and verify that async INFO contains client ID optsB := test.DefaultTestOptions optsB.Port = -1 optsB.Cluster.Port = -1 optsB.Cluster.Name = "test" optsB.Routes = server.RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", srvA.ClusterAddr().Port)) srvB := RunServerWithOptions(&optsB) defer srvB.Shutdown() // Wait for the discovered callback to fire if err := Wait(ch); err != nil { t.Fatal("Did not fire the discovered callback") } // Now check CID should be valid and same as before newCID, err := nc1.GetClientID() if err != nil { t.Fatalf("Error getting CID: %v", err) } if newCID != cid { t.Fatalf("Expected CID to be %v, got %v", cid, newCID) } // Create a client to server B nc2, err := nats.Connect(srvB.ClientURL()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc2.Close() // Stop server A, nc1 will reconnect to B, and should have different CID srvA.Shutdown() // Wait for nc1 to reconnect if err := Wait(ch); err != nil { t.Fatal("Did not reconnect") } newCID, err = nc1.GetClientID() if err != nil { t.Fatalf("Error getting CID: %v", err) } if newCID == 0 { t.Fatal("Unexpected cid value, make sure server is 1.2.0+") } if newCID == cid { t.Fatalf("Expected different CID since server already had a client") } nc1.Close() newCID, err = nc1.GetClientID() if err == nil { t.Fatalf("Expected error, got none") } if newCID != 0 { t.Fatalf("Expected 0 on connection closed, got %v", newCID) } // Stop clients and remaining server nc1.Close() nc2.Close() srvB.Shutdown() // Now have dummy server that returns no CID and check we get expected error. l, e := net.Listen("tcp", "127.0.0.1:0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) wg := sync.WaitGroup{} wg.Add(1) errCh := make(chan error, 1) go func() { defer wg.Done() conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"host\":\"%s\",\"port\":%d,\"auth_required\":false,\"tls_required\":false,\"max_payload\":1048576}\r\n", addr.IP, addr.Port) conn.Write([]byte(info)) // Read connect and ping commands sent from the client line := make([]byte, 256) _, err = conn.Read(line) if err != nil { errCh <- fmt.Errorf("expected CONNECT and PING from client, got: %s", err) return } conn.Write([]byte("PONG\r\n")) // Now wait to be notified that we can finish <-ch }() nc, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() if cid, err := nc.GetClientID(); err != nats.ErrClientIDNotSupported || cid != 0 { t.Fatalf("Expected err=%v and cid=0, got err=%v and cid=%v", nats.ErrClientIDNotSupported, err, cid) } // Release fake server nc.Close() ch <- true wg.Wait() checkErrChannel(t, errCh) } func TestTLSDontSkipVerify(t *testing.T) { s, opts := RunServerWithConfig("./configs/tls_noip_a.conf") defer s.Shutdown() // Connect with nats:// prefix to a server that requires TLS. // The library will automatically switch to TLS, but we should // not skip hostname verification. sURL := fmt.Sprintf("nats://derek:porkchop@127.0.0.1:%d", opts.Port) nc, err := nats.Connect(sURL, nats.RootCAs("./configs/certs/ca.pem")) // Verify that error is about hostname verification if err == nil || !strings.Contains(err.Error(), "IP SAN") { if nc != nil { nc.Close() } t.Fatalf("Expected error about hostname verification, got %v", err) } // Check that we can override skip verify by providing our own TLS Config. nc, err = nats.Connect(sURL, nats.RootCAs("./configs/certs/ca.pem"), nats.Secure(&tls.Config{InsecureSkipVerify: true})) if err != nil { t.Fatalf("Error on connect: %v", err) } nc.Close() // Now change the URL to include hostname and verify that using // nats:// scheme does work. sURL = fmt.Sprintf("nats://derek:porkchop@%s:%d", opts.Host, opts.Port) nc, err = nats.Connect(sURL, nats.RootCAs("./configs/certs/ca.pem")) if err != nil { t.Fatalf("Error on connect: %v", err) } nc.Close() } func TestRetryOnFailedConnect(t *testing.T) { nc, err := nats.Connect(nats.DefaultURL) if err == nil { nc.Close() t.Fatal("Expected error, did not get one") } reconnectedCh := make(chan bool, 1) connectedCh := make(chan bool, 1) dch := make(chan bool, 1) nc, err = nats.Connect(nats.DefaultURL, nats.RetryOnFailedConnect(true), nats.MaxReconnects(-1), nats.ReconnectWait(15*time.Millisecond), nats.DisconnectErrHandler(func(_ *nats.Conn, _ error) { dch <- true }), nats.ConnectHandler(func(_ *nats.Conn) { connectedCh <- true }), nats.ReconnectHandler(func(_ *nats.Conn) { reconnectedCh <- true }), nats.NoCallbacksAfterClientClose()) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } for i := 0; i < 2; i++ { // Start server now s := RunDefaultServer() defer s.Shutdown() switch i { case 0: select { case <-connectedCh: case <-time.After(2 * time.Second): t.Fatal("Should have connected") } case 1: select { case <-reconnectedCh: case <-time.After(2 * time.Second): t.Fatal("Should have reconnected") } } // Now make sure that the pub worked and sub worked. // We should receive the message we have published. if _, err := sub.NextMsg(time.Second); err != nil { t.Fatalf("Iter=%v - did not receive message: %v", i, err) } // Check that normal disconnect/reconnect works as expected s.Shutdown() select { case <-dch: case <-time.After(time.Second): t.Fatal("Should have been disconnected") } if i == 0 { if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Iter=%v - error on publish: %v", i, err) } } } nc.Close() // Try again but this time we will restart a server with u/p and auth should fail. closedCh := make(chan bool, 1) nc, err = nats.Connect(nats.DefaultURL, nats.RetryOnFailedConnect(true), nats.MaxReconnects(-1), nats.ReconnectWait(15*time.Millisecond), nats.ReconnectHandler(func(_ *nats.Conn) { reconnectedCh <- true }), nats.ClosedHandler(func(_ *nats.Conn) { closedCh <- true })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() o := test.DefaultTestOptions o.Host = "127.0.0.1" o.Port = 4222 o.Username = "user" o.Password = "password" s := RunServerWithOptions(&o) defer s.Shutdown() select { case <-closedCh: case <-time.After(2 * time.Second): t.Fatal("Should have stopped trying to connect due to auth failure") } // Make sure that we did not get the (re)connected CB select { case <-reconnectedCh: t.Fatal("(re)connected callback should not have been invoked") default: } } func TestRetryOnFailedConnectWithTLSError(t *testing.T) { opts := test.DefaultTestOptions opts.Port = 4222 tc := &server.TLSConfigOpts{ CertFile: "./configs/certs/server.pem", KeyFile: "./configs/certs/key.pem", CaFile: "./configs/certs/ca.pem", } var err error if opts.TLSConfig, err = server.GenTLSConfig(tc); err != nil { t.Fatalf("Can't build TLCConfig: %v", err) } opts.TLSTimeout = 0.0001 s := RunServerWithOptions(&opts) defer s.Shutdown() connectedCh := make(chan bool, 1) nc, err := nats.Connect(nats.DefaultURL, nats.Secure(&tls.Config{InsecureSkipVerify: true}), nats.RetryOnFailedConnect(true), nats.MaxReconnects(-1), nats.ReconnectWait(15*time.Millisecond), nats.ConnectHandler(func(_ *nats.Conn) { connectedCh <- true }), nats.NoCallbacksAfterClientClose()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // Wait for several failed attempts time.Sleep(100 * time.Millisecond) // Replace tls timeout to a reasonable value. s.Shutdown() opts.TLSTimeout = 2.0 s = RunServerWithOptions(&opts) defer s.Shutdown() select { case <-connectedCh: case <-time.After(time.Second): t.Fatal("Should have connected") } } func TestConnStatusChangedEvents(t *testing.T) { t.Run("default events", func(t *testing.T) { s := RunDefaultServer() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %s", err) } statusCh := nc.StatusChanged() defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() time.Sleep(50 * time.Millisecond) s.Shutdown() WaitOnChannel(t, newStatus, nats.RECONNECTING) s = RunDefaultServer() defer s.Shutdown() WaitOnChannel(t, newStatus, nats.CONNECTED) nc.Close() WaitOnChannel(t, newStatus, nats.CLOSED) select { case s := <-newStatus: t.Fatalf("Unexpected status received: %s", s) case <-time.After(100 * time.Millisecond): } }) t.Run("custom event only", func(t *testing.T) { s := RunDefaultServer() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %s", err) } statusCh := nc.StatusChanged(nats.CLOSED) defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() time.Sleep(50 * time.Millisecond) s.Shutdown() s = RunDefaultServer() defer s.Shutdown() nc.Close() WaitOnChannel(t, newStatus, nats.CLOSED) select { case s := <-newStatus: t.Fatalf("Unexpected status received: %s", s) case <-time.After(100 * time.Millisecond): } }) t.Run("do not block on channel if it's not used", func(t *testing.T) { s := RunDefaultServer() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %s", err) } defer nc.Close() // do not use the returned channel, client should never block _ = nc.StatusChanged() s.Shutdown() s = RunDefaultServer() defer s.Shutdown() if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(100 * time.Millisecond) }) } func TestTLSHandshakeFirst(t *testing.T) { s, opts := RunServerWithConfig("./configs/tls.conf") defer s.Shutdown() secureURL := fmt.Sprintf("tls://derek:porkchop@localhost:%d", opts.Port) nc, err := nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem"), nats.TLSHandshakeFirst()) if err == nil || !strings.Contains(err.Error(), "TLS handshake") { if err == nil { nc.Close() } t.Fatalf("Expected error about not being a TLS handshake, got %v", err) } tc := &server.TLSConfigOpts{ CertFile: "./configs/certs/server.pem", KeyFile: "./configs/certs/key.pem", } tlsConf, err := server.GenTLSConfig(tc) if err != nil { t.Fatalf("Can't build TLCConfig: %v", err) } tlsConf.ServerName = "localhost" // Start a mockup server that will do the TLS handshake first // and then send the INFO protocol. l, e := net.Listen("tcp", ":0") if e != nil { t.Fatal("Could not listen on an ephemeral port") } tl := l.(*net.TCPListener) defer tl.Close() addr := tl.Addr().(*net.TCPAddr) errCh := make(chan error, 1) doneCh := make(chan struct{}) wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() conn, err := l.Accept() if err != nil { errCh <- fmt.Errorf("error accepting client connection: %v", err) return } defer conn.Close() // Do the TLS handshake now. conn = tls.Server(conn, tlsConf) tlsconn := conn.(*tls.Conn) if err := tlsconn.Handshake(); err != nil { errCh <- fmt.Errorf("Server error during handshake: %v", err) return } // Send back the INFO info := fmt.Sprintf("INFO {\"server_id\":\"foobar\",\"host\":\"localhost\",\"port\":%d,\"auth_required\":false,\"tls_required\":true,\"tls_available\":true,\"tls_verify\":true,\"max_payload\":1048576}\r\n", addr.Port) tlsconn.Write([]byte(info)) // Read connect and ping commands sent from the client line := make([]byte, 256) _, err = tlsconn.Read(line) if err != nil { errCh <- fmt.Errorf("expected CONNECT and PING from client, got: %s", err) return } tlsconn.Write([]byte("PONG\r\n")) // Wait for the signal that client is ok <-doneCh // Server is done now. errCh <- nil }() time.Sleep(100 * time.Millisecond) secureURL = fmt.Sprintf("tls://derek:porkchop@localhost:%d", addr.Port) nc, err = nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem"), nats.TLSHandshakeFirst()) if err != nil { wg.Wait() e := <-errCh t.Fatalf("Unexpected error: %v (server error=%s)", err, e.Error()) } state, err := nc.TLSConnectionState() if err != nil { t.Fatalf("Expected connection state: %v", err) } if !state.HandshakeComplete { t.Fatalf("Expected valid connection state") } nc.Close() close(doneCh) wg.Wait() select { case e := <-errCh: if e != nil { t.Fatalf("Error from server: %v", err) } case <-time.After(2 * time.Second): t.Fatal("Server did not exit") } } nats.go-1.41.0/test/context_test.go000066400000000000000000000542101477351342400172130ustar00rootroot00000000000000// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "strings" "sync" "testing" "time" "github.com/nats-io/nats.go" ) func TestContextRequestWithNilConnection(t *testing.T) { var nc *nats.Conn ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() // should always be called, not discarded, to prevent context leak _, err := nc.RequestWithContext(ctx, "fast", []byte("")) if err == nil { t.Fatal("Expected request with context and nil connection to fail") } if err != nats.ErrInvalidConnection { t.Fatalf("Expected nats.ErrInvalidConnection, got %v\n", err) } } func testContextRequestWithTimeout(t *testing.T, nc *nats.Conn) { nc.Subscribe("slow", func(m *nats.Msg) { // Simulates latency into the client so that timeout is hit. time.Sleep(200 * time.Millisecond) nc.Publish(m.Reply, []byte("NG")) }) nc.Subscribe("fast", func(m *nats.Msg) { nc.Publish(m.Reply, []byte("OK")) }) nc.Subscribe("hdrs", func(m *nats.Msg) { if m.Header.Get("Hdr-Test") != "1" { m.Respond([]byte("-ERR")) } r := nats.NewMsg(m.Reply) r.Header = m.Header r.Data = []byte("+OK") m.RespondMsg(r) }) ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() // should always be called, not discarded, to prevent context leak // Fast request should not fail at this point. resp, err := nc.RequestWithContext(ctx, "fast", []byte("")) if err != nil { t.Fatalf("Expected request with context to not fail on fast response: %s", err) } got := string(resp.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } // Slow request hits timeout so expected to fail. _, err = nc.RequestWithContext(ctx, "slow", []byte("world")) if err == nil { t.Fatal("Expected request with timeout context to fail") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Error("Expected to have a timeout error") } expected = `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } // 2nd request should fail again even if they would be fast because context // has already timed out. _, err = nc.RequestWithContext(ctx, "fast", []byte("world")) if err == nil { t.Fatal("Expected request with context to fail") } // now test headers make it all the way back msg := nats.NewMsg("hdrs") msg.Header.Add("Hdr-Test", "1") resp, err = nc.RequestMsgWithContext(context.Background(), msg) if err != nil { t.Fatalf("Expected request to be published: %v", err) } if string(resp.Data) != "+OK" { t.Fatalf("Headers were not published to the requestor") } if resp.Header.Get("Hdr-Test") != "1" { t.Fatalf("Did not receive header in response") } } func TestContextRequestWithTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() testContextRequestWithTimeout(t, nc) } func TestOldContextRequestWithTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() testContextRequestWithTimeout(t, nc) } func testContextRequestWithTimeoutCanceled(t *testing.T, nc *nats.Conn) { ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() nc.Subscribe("fast", func(m *nats.Msg) { nc.Publish(m.Reply, []byte("OK")) }) // Fast request should not fail resp, err := nc.RequestWithContext(ctx, "fast", []byte("")) if err != nil { t.Fatalf("Expected request with context to not fail on fast response: %s", err) } got := string(resp.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } // Cancel the context already so that rest of requests fail. cancelCB() // Context is already canceled so requests should immediately fail. _, err = nc.RequestWithContext(ctx, "fast", []byte("world")) if err == nil { t.Fatal("Expected request with timeout context to fail") } // Reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected = `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } // 2nd request should fail again even if fast because context has already been canceled _, err = nc.RequestWithContext(ctx, "fast", []byte("world")) if err == nil { t.Fatal("Expected request with context to fail") } } func TestContextRequestWithTimeoutCanceled(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() testContextRequestWithTimeoutCanceled(t, nc) } func TestOldContextRequestWithTimeoutCanceled(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() testContextRequestWithTimeoutCanceled(t, nc) } func testContextRequestWithCancel(t *testing.T, nc *nats.Conn) { ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() // should always be called, not discarded, to prevent context leak // timer which cancels the context though can also be arbitrarily extended expirationTimer := time.AfterFunc(100*time.Millisecond, func() { cancelCB() }) sub1, err := nc.Subscribe("slow", func(m *nats.Msg) { // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) nc.Publish(m.Reply, []byte("OK")) }) if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } defer sub1.Unsubscribe() sub2, err := nc.Subscribe("slower", func(m *nats.Msg) { // we know this request will take longer so extend the timeout expirationTimer.Reset(100 * time.Millisecond) // slower reply which would have hit original timeout time.Sleep(70 * time.Millisecond) nc.Publish(m.Reply, []byte("Also OK")) }) if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } defer sub2.Unsubscribe() for i := 0; i < 2; i++ { resp, err := nc.RequestWithContext(ctx, "slow", []byte("")) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := string(resp.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // A third request with latency would make the context // get canceled, but these reset the timer so deadline // gets extended: for i := 0; i < 10; i++ { resp, err := nc.RequestWithContext(ctx, "slower", []byte("")) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := string(resp.Data) expected := "Also OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // One more slow request will expire the timer and cause an error... _, err = nc.RequestWithContext(ctx, "slow", []byte("")) if err == nil { t.Fatal("Expected request with cancellation context to fail") } // ...though reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected := `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextOldRequestClosed(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() ctx, cancelCB := context.WithTimeout(context.Background(), time.Second) defer cancelCB() // should always be called, not discarded, to prevent context leak errCh := make(chan error, 1) start := time.Now() go func() { sub, _ := nc.SubscribeSync("checkClose") defer sub.Unsubscribe() _, err = nc.RequestWithContext(ctx, "checkClose", []byte("should be kicked out on close")) errCh <- err }() time.Sleep(100 * time.Millisecond) nc.Close() if e := <-errCh; e != nats.ErrConnectionClosed { t.Fatalf("Unexpected error: %v", e) } if dur := time.Since(start); dur >= time.Second { t.Fatalf("Request took too long to bail out: %v", dur) } } func TestContextRequestWithCancel(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() testContextRequestWithCancel(t, nc) } func TestOldContextRequestWithCancel(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() testContextRequestWithCancel(t, nc) } func testContextRequestWithDeadline(t *testing.T, nc *nats.Conn) { deadline := time.Now().Add(100 * time.Millisecond) ctx, cancelCB := context.WithDeadline(context.Background(), deadline) defer cancelCB() // should always be called, not discarded, to prevent context leak nc.Subscribe("slow", func(m *nats.Msg) { // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) nc.Publish(m.Reply, []byte("OK")) }) for i := 0; i < 2; i++ { resp, err := nc.RequestWithContext(ctx, "slow", []byte("")) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := string(resp.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // A third request with latency would make the context // reach the deadline. _, err := nc.RequestWithContext(ctx, "slow", []byte("")) if err == nil { t.Fatal("Expected request with context to reach deadline") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error Timeout interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Errorf("Expected to have a timeout error") } expected := `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextRequestWithDeadline(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() testContextRequestWithDeadline(t, nc) } func TestOldContextRequestWithDeadline(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL, nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Failed to connect: %v", err) } defer nc.Close() testContextRequestWithDeadline(t, nc) } func TestContextSubNextMsgWithTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() // should always be called, not discarded, to prevent context leak sub, err := nc.SubscribeSync("slow") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } for i := 0; i < 2; i++ { err := nc.Publish("slow", []byte("OK")) if err != nil { t.Fatalf("Expected publish to not fail: %s", err) } // Enough time to get a couple of messages time.Sleep(40 * time.Millisecond) msg, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Expected to receive message: %s", err) } got := string(msg.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // Third message will fail because the context will be canceled by now _, err = sub.NextMsgWithContext(ctx) if err == nil { t.Fatal("Expected to fail receiving a message") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error Timeout interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Errorf("Expected to have a timeout error") } expected := `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextSubNextMsgWithTimeoutCanceled(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() // should always be called, not discarded, to prevent context leak sub, err := nc.SubscribeSync("fast") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } for i := 0; i < 2; i++ { err := nc.Publish("fast", []byte("OK")) if err != nil { t.Fatalf("Expected publish to not fail: %s", err) } // Enough time to get a couple of messages time.Sleep(40 * time.Millisecond) msg, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Expected to receive message: %s", err) } got := string(msg.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // Cancel the context already so that rest of NextMsg calls fail. cancelCB() _, err = sub.NextMsgWithContext(ctx) if err == nil { t.Fatal("Expected request with timeout context to fail") } // Reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected := `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextSubNextMsgWithCancel(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() // should always be called, not discarded, to prevent context leak // timer which cancels the context though can also be arbitrarily extended time.AfterFunc(100*time.Millisecond, func() { cancelCB() }) sub1, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } sub2, err := nc.SubscribeSync("bar") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } for i := 0; i < 2; i++ { err := nc.Publish("foo", []byte("OK")) if err != nil { t.Fatalf("Expected publish to not fail: %s", err) } resp, err := sub1.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := string(resp.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } err = nc.Publish("bar", []byte("Also OK")) if err != nil { t.Fatalf("Expected publish to not fail: %s", err) } resp, err := sub2.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := string(resp.Data) expected := "Also OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } // We do not have another message pending so timer will // cancel the context. _, err = sub2.NextMsgWithContext(ctx) if err == nil { t.Fatal("Expected request with context to fail") } // Reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected = `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextSubNextMsgWithDeadline(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() deadline := time.Now().Add(100 * time.Millisecond) ctx, cancelCB := context.WithDeadline(context.Background(), deadline) defer cancelCB() // should always be called, not discarded, to prevent context leak sub, err := nc.SubscribeSync("slow") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } for i := 0; i < 2; i++ { err := nc.Publish("slow", []byte("OK")) if err != nil { t.Fatalf("Expected publish to not fail: %s", err) } // Enough time to get a couple of messages time.Sleep(40 * time.Millisecond) msg, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Expected to receive message: %s", err) } got := string(msg.Data) expected := "OK" if got != expected { t.Errorf("Expected to receive %s, got: %s", expected, got) } } // Third message will fail because the context will be canceled by now _, err = sub.NextMsgWithContext(ctx) if err == nil { t.Fatal("Expected to fail receiving a message") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error Timeout interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Errorf("Expected to have a timeout error") } expected := `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextRequestConnClosed(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() time.AfterFunc(100*time.Millisecond, func() { cancelCB() }) nc.Close() _, err := nc.RequestWithContext(ctx, "foo", []byte("")) if err == nil { t.Fatal("Expected request to fail with error") } if err != nats.ErrConnectionClosed { t.Errorf("Expected request to fail with connection closed error: %s", err) } } func TestContextBadSubscription(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() time.AfterFunc(100*time.Millisecond, func() { cancelCB() }) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } err = sub.Unsubscribe() if err != nil { t.Fatalf("Expected to be able to unsubscribe: %s", err) } _, err = sub.NextMsgWithContext(ctx) if err == nil { t.Fatal("Expected to fail getting next message with context") } if err != nats.ErrBadSubscription { t.Errorf("Expected request to fail with connection closed error: %s", err) } } func TestFlushWithContext(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx := context.Background() // No context should error. //lint:ignore SA1012 testing that passing nil fails if err := nc.FlushWithContext(nil); err != nats.ErrInvalidContext { t.Fatalf("Expected '%v', got '%v'", nats.ErrInvalidContext, err) } // A context with no deadline set should error also. if err := nc.FlushWithContext(ctx); err != nats.ErrNoDeadlineContext { t.Fatalf("Expected '%v', got '%v'", nats.ErrNoDeadlineContext, err) } dctx, cancel := context.WithTimeout(ctx, 10*time.Second) cancel() // A closed context should error. if err := nc.FlushWithContext(dctx); err != context.Canceled { t.Fatalf("Expected '%v', got '%v'", context.Canceled, err) } } func TestUnsubscribeAndNextMsgWithContext(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() // should always be called, not discarded, to prevent context leak sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } sub.Unsubscribe() if _, err = sub.NextMsgWithContext(ctx); err != nats.ErrBadSubscription { t.Fatalf("Expected '%v', but got: '%v'", nats.ErrBadSubscription, err) } ctx, cancelCB = context.WithCancel(context.Background()) defer cancelCB() // should always be called, not discarded, to prevent context leak sub, err = nc.SubscribeSync("foo") if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } // Now make sure we get same error when unsubscribing from separate routine // while in the call. wg := sync.WaitGroup{} wg.Add(1) go func() { time.Sleep(100 * time.Millisecond) sub.Unsubscribe() wg.Done() }() if _, err = sub.NextMsgWithContext(ctx); err != nats.ErrBadSubscription { t.Fatalf("Expected '%v', but got: '%v'", nats.ErrBadSubscription, err) } wg.Wait() } func TestContextInvalid(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() //lint:ignore SA1012 testing that passing nil fails _, err := nc.RequestWithContext(nil, "foo", []byte("")) if err == nil { t.Fatal("Expected request to fail with error") } if err != nats.ErrInvalidContext { t.Errorf("Expected request to fail with connection closed error: %s", err) } sub, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Expected to be able to subscribe: %s", err) } //lint:ignore SA1012 testing that passing nil fails _, err = sub.NextMsgWithContext(nil) if err == nil { t.Fatal("Expected request to fail with error") } if err != nats.ErrInvalidContext { t.Errorf("Expected request to fail with connection closed error: %s", err) } } nats.go-1.41.0/test/drain_test.go000066400000000000000000000274411477351342400166320ustar00rootroot00000000000000// Copyright 2018-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" ) // Drain can be very useful for graceful shutdown of subscribers. // Especially queue subscribers. func TestDrain(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() done := make(chan bool) received := int32(0) expected := int32(100) cb := func(_ *nats.Msg) { // Allow this to back up. time.Sleep(time.Millisecond) rcvd := atomic.AddInt32(&received, 1) if rcvd >= expected { done <- true } } sub, err := nc.Subscribe("foo", cb) if err != nil { t.Fatalf("Error creating subscription; %v", err) } for i := int32(0); i < expected; i++ { nc.Publish("foo", []byte("Don't forget about me")) } // Drain it and make sure we receive all messages. sub.Drain() if !sub.IsDraining() { t.Fatalf("Expected to be draining") } select { case <-done: break case <-time.After(5 * time.Second): r := atomic.LoadInt32(&received) if r != expected { t.Fatalf("Did not receive all messages: %d of %d", r, expected) } } time.Sleep(100 * time.Millisecond) if sub.IsDraining() { t.Fatalf("Expected to be done draining") } } func TestDrainQueueSub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() done := make(chan bool) received := int32(0) expected := int32(4096) numSubs := int32(32) checkDone := func() int32 { rcvd := atomic.AddInt32(&received, 1) if rcvd >= expected { done <- true } return rcvd } callback := func(m *nats.Msg) { rcvd := checkDone() // Randomly replace this sub from time to time. if rcvd%3 == 0 { m.Sub.Drain() // Create a new one that we will not drain. nc.QueueSubscribe("foo", "bar", func(m *nats.Msg) { checkDone() }) } } for i := int32(0); i < numSubs; i++ { _, err := nc.QueueSubscribe("foo", "bar", callback) if err != nil { t.Fatalf("Error creating subscription; %v", err) } } for i := int32(0); i < expected; i++ { nc.Publish("foo", []byte("Don't forget about me")) } select { case <-done: break case <-time.After(5 * time.Second): r := atomic.LoadInt32(&received) if r != expected { t.Fatalf("Did not receive all messages: %d of %d", r, expected) } } } func waitFor(t *testing.T, totalWait, sleepDur time.Duration, f func() error) { t.Helper() timeout := time.Now().Add(totalWait) var err error for time.Now().Before(timeout) { err = f() if err == nil { return } time.Sleep(sleepDur) } if err != nil { t.Fatal(err.Error()) } } func TestDrainUnSubs(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() num := 100 subs := make([]*nats.Subscription, num) // Normal Unsubscribe for i := 0; i < num; i++ { sub, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Error creating subscription; %v", err) } subs[i] = sub } if numSubs := nc.NumSubscriptions(); numSubs != num { t.Fatalf("Expected %d subscriptions, got %d", num, numSubs) } for i := 0; i < num; i++ { subs[i].Unsubscribe() } if numSubs := nc.NumSubscriptions(); numSubs != 0 { t.Fatalf("Expected no subscriptions, got %d", numSubs) } // Drain version for i := 0; i < num; i++ { sub, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Error creating subscription; %v", err) } subs[i] = sub } if numSubs := nc.NumSubscriptions(); numSubs != num { t.Fatalf("Expected %d subscriptions, got %d", num, numSubs) } for i := 0; i < num; i++ { subs[i].Drain() } // Should happen quickly that we get to zero, so do not need to wait long. waitFor(t, 2*time.Second, 10*time.Millisecond, func() error { if numSubs := nc.NumSubscriptions(); numSubs != 0 { return fmt.Errorf("Expected no subscriptions, got %d", numSubs) } return nil }) } func TestDrainSlowSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() received := int32(0) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt32(&received, 1) time.Sleep(100 * time.Millisecond) }) if err != nil { t.Fatalf("Error creating subscription; %v", err) } total := 10 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Slow Slow")) } nc.Flush() pmsgs, _, _ := sub.Pending() if pmsgs != total && pmsgs != total-1 { t.Fatalf("Expected most messages to be pending, but got %d vs %d", pmsgs, total) } sub.Drain() // Should take a second or so to drain away. waitFor(t, 2*time.Second, 100*time.Millisecond, func() error { // Wait for it to become invalid. Once drained it is unsubscribed. _, _, err := sub.Pending() if err != nats.ErrBadSubscription { return errors.New("Still valid") } r := int(atomic.LoadInt32(&received)) if r != total { t.Fatalf("Did not receive all messages, got %d vs %d", r, total) } return nil }) } func TestDrainConnection(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() done := make(chan bool) rdone := make(chan bool) closed := func(nc *nats.Conn) { done <- true } url := fmt.Sprintf("nats://127.0.0.1:%d", nats.DefaultPort) nc, err := nats.Connect(url, nats.ClosedHandler(closed)) if err != nil { t.Fatalf("Failed to create default connection: %v", err) } defer nc.Close() nc2, err := nats.Connect(url) if err != nil { t.Fatalf("Failed to create default connection: %v", err) } defer nc2.Close() received := int32(0) responses := int32(0) expected := int32(50) sleep := 10 * time.Millisecond // Create the listener for responses on "bar" _, err = nc2.Subscribe("bar", func(_ *nats.Msg) { r := atomic.AddInt32(&responses, 1) if r == expected { rdone <- true } }) if err != nil { t.Fatalf("Error creating subscription for responses: %v", err) } // Create a slow subscriber for the responder sub, err := nc.Subscribe("foo", func(m *nats.Msg) { time.Sleep(sleep) atomic.AddInt32(&received, 1) err := nc.Publish(m.Reply, []byte("Stop bugging me")) if err != nil { t.Errorf("Publisher received an error sending response: %v\n", err) } }) if err != nil { t.Fatalf("Error creating subscription; %v", err) } // Publish some messages for i := int32(0); i < expected; i++ { nc.PublishRequest("foo", "bar", []byte("Slow Slow")) } drainStart := time.Now() nc.Drain() // Sub should be disabled immediately if err := sub.Unsubscribe(); err == nil { t.Fatalf("Expected to receive an error on Unsubscribe after drain") } // Also can not create any new subs if _, err := nc.Subscribe("foo", func(_ *nats.Msg) {}); err == nil { t.Fatalf("Expected to receive an error on new Subscription after drain") } // Make sure we can still publish, this is for any responses. if err := nc.Publish("baz", []byte("Slow Slow")); err != nil { t.Fatalf("Expected to not receive an error on Publish after drain, got %v", err) } // Wait for the closed state from nc select { case <-done: if time.Since(drainStart) < (sleep * time.Duration(expected)) { t.Fatalf("Drain exited too soon\n") } r := atomic.LoadInt32(&received) if r != expected { t.Fatalf("Did not receive all messages from Drain, %d vs %d", r, expected) } break case <-time.After(2 * time.Second): t.Fatalf("Timeout waiting for closed state for connection") } // Now make sure all responses were received. select { case <-rdone: r := atomic.LoadInt32(&responses) if r != expected { t.Fatalf("Did not receive all responses, %d vs %d", r, expected) } break case <-time.After(2 * time.Second): t.Fatalf("Timeout waiting for all the responses") } } func TestDrainConnectionAutoUnsub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() errors := int32(0) received := int32(0) expected := int32(10) done := make(chan bool) closed := func(nc *nats.Conn) { done <- true } errCb := func(nc *nats.Conn, s *nats.Subscription, err error) { atomic.AddInt32(&errors, 1) } url := fmt.Sprintf("nats://127.0.0.1:%d", nats.DefaultPort) nc, err := nats.Connect(url, nats.ErrorHandler(errCb), nats.ClosedHandler(closed)) if err != nil { t.Fatalf("Failed to create default connection: %v", err) } defer nc.Close() sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { // So they back up a bit in client and allow drain to do its thing. time.Sleep(10 * time.Millisecond) atomic.AddInt32(&received, 1) }) if err != nil { t.Fatalf("Error creating subscription; %v", err) } sub.AutoUnsubscribe(int(expected)) // Publish some messages for i := 0; i < 50; i++ { nc.Publish("foo", []byte("Only 10 please!")) } // Flush here so messages coming back into client. nc.Flush() // Now add drain state. time.Sleep(10 * time.Millisecond) nc.Drain() // Wait for the closed state from nc select { case <-done: errs := atomic.LoadInt32(&errors) if errs > 0 { t.Fatalf("Did not expect any errors, got %d", errs) } r := atomic.LoadInt32(&received) if r != expected { t.Fatalf("Did not receive all messages from Drain, %d vs %d", r, expected) } break case <-time.After(2 * time.Second): t.Fatalf("Timeout waiting for closed state for connection") } } func TestDrainConnLastError(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() done := make(chan bool, 1) closedCb := func(nc *nats.Conn) { done <- true } nc, err := nats.Connect(nats.DefaultURL, nats.ClosedHandler(closedCb), nats.DrainTimeout(time.Millisecond)) if err != nil { t.Fatalf("Failed to create default connection: %v", err) } defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) wg := sync.WaitGroup{} wg.Add(1) if _, err := nc.Subscribe("foo", func(_ *nats.Msg) { // So they back up a bit in client to make drain timeout time.Sleep(100 * time.Millisecond) wg.Done() }); err != nil { t.Fatalf("Error creating subscription; %v", err) } if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } if err := nc.Drain(); err != nil { t.Fatalf("Error on drain: %v", err) } select { case <-done: if e := nc.LastError(); e == nil || e != nats.ErrDrainTimeout { t.Fatalf("Expected last error to be set to %v, got %v", nats.ErrDrainTimeout, e) } case <-time.After(2 * time.Second): t.Fatalf("Timeout waiting for closed state for connection") } // Wait for subscription callback to return wg.Wait() } func TestDrainConnDuringReconnect(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() done := make(chan bool, 1) closedCb := func(nc *nats.Conn) { done <- true } nc, err := nats.Connect(nats.DefaultURL, nats.ClosedHandler(closedCb), nats.DrainTimeout(20*time.Millisecond)) if err != nil { t.Fatalf("Failed to create default connection: %v", err) } defer nc.Close() // Shutdown the server. s.Shutdown() waitFor(t, time.Second, 10*time.Millisecond, func() error { if nc.IsReconnecting() { return nil } return errors.New("Not reconnecting yet") }) // This should work correctly. if err := nc.Drain(); err != nats.ErrConnectionReconnecting { t.Fatalf("Unexpected error on drain: %v", err) } // Closed should still fire. select { case <-done: case <-time.After(2 * time.Second): t.Fatalf("Timeout waiting for closed state for connection") } } nats.go-1.41.0/test/enc_test.go000066400000000000000000000663071477351342400163060ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "context" "fmt" "strings" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/encoders/builtin" "github.com/nats-io/nats.go/encoders/protobuf" "github.com/nats-io/nats.go/encoders/protobuf/testdata" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn func NewDefaultEConn(t *testing.T) *nats.EncodedConn { ec, err := nats.NewEncodedConn(NewConnection(t, TEST_PORT), nats.DEFAULT_ENCODER) if err != nil { t.Fatalf("Failed to create an encoded connection: %v\n", err) } return ec } func TestEncBuiltinConstructorErrs(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() c := NewConnection(t, TEST_PORT) _, err := nats.NewEncodedConn(nil, "default") if err == nil { t.Fatal("Expected err for nil connection") } _, err = nats.NewEncodedConn(c, "foo22") if err == nil { t.Fatal("Expected err for bad encoder") } c.Close() _, err = nats.NewEncodedConn(c, "default") if err == nil { t.Fatal("Expected err for closed connection") } } func TestEncBuiltinMarshalString(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" ec.Subscribe("enc_string", func(s string) { if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } ch <- true }) ec.Publish("enc_string", testString) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalBytes(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testBytes := []byte("Hello World!") ec.Subscribe("enc_bytes", func(b []byte) { if !bytes.Equal(b, testBytes) { t.Fatalf("Received test bytes of '%s', wanted '%s'\n", b, testBytes) } ch <- true }) ec.Publish("enc_bytes", testBytes) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalInt(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testN := 22 ec.Subscribe("enc_int", func(n int) { if n != testN { t.Fatalf("Received test number of %d, wanted %d\n", n, testN) } ch <- true }) ec.Publish("enc_int", testN) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalInt32(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testN := 22 ec.Subscribe("enc_int", func(n int32) { if n != int32(testN) { t.Fatalf("Received test number of %d, wanted %d\n", n, testN) } ch <- true }) ec.Publish("enc_int", testN) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalInt64(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testN := 22 ec.Subscribe("enc_int", func(n int64) { if n != int64(testN) { t.Fatalf("Received test number of %d, wanted %d\n", n, testN) } ch <- true }) ec.Publish("enc_int", testN) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalFloat32(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testN := float32(22) ec.Subscribe("enc_float", func(n float32) { if n != testN { t.Fatalf("Received test number of %f, wanted %f\n", n, testN) } ch <- true }) ec.Publish("enc_float", testN) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalFloat64(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testN := float64(22.22) ec.Subscribe("enc_float", func(n float64) { if n != testN { t.Fatalf("Received test number of %f, wanted %f\n", n, testN) } ch <- true }) ec.Publish("enc_float", testN) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinMarshalBool(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) expected := make(chan bool, 1) ec.Subscribe("enc_bool", func(b bool) { val := <-expected if b != val { t.Fatal("Boolean values did not match") } ch <- true }) expected <- false ec.Publish("enc_bool", false) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } expected <- true ec.Publish("enc_bool", true) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinExtendedSubscribeCB(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" subject := "cb_args" ec.Subscribe(subject, func(subj, s string) { if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } if subj != subject { t.Fatalf("Received subject of '%s', wanted '%s'\n", subj, subject) } ch <- true }) ec.Publish(subject, testString) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinExtendedSubscribeCB2(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" oSubj := "cb_args" oReply := "foobar" ec.Subscribe(oSubj, func(subj, reply, s string) { if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } if subj != oSubj { t.Fatalf("Received subject of '%s', wanted '%s'\n", subj, oSubj) } if reply != oReply { t.Fatalf("Received reply of '%s', wanted '%s'\n", reply, oReply) } ch <- true }) ec.PublishRequest(oSubj, oReply, testString) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinRawMsgSubscribeCB(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" oSubj := "cb_args" oReply := "foobar" ec.Subscribe(oSubj, func(m *nats.Msg) { s := string(m.Data) if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } if m.Subject != oSubj { t.Fatalf("Received subject of '%s', wanted '%s'\n", m.Subject, oSubj) } if m.Reply != oReply { t.Fatalf("Received reply of '%s', wanted '%s'\n", m.Reply, oReply) } ch <- true }) ec.PublishRequest(oSubj, oReply, testString) if e := Wait(ch); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinRequest(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() expectedResp := "I can help!" ec.Subscribe("help", func(subj, reply, req string) { ec.Publish(reply, expectedResp) }) var resp string err := ec.Request("help", "help me", &resp, 1*time.Second) if err != nil { t.Fatalf("Failed at receiving proper response: %v\n", err) } if resp != expectedResp { t.Fatalf("Received reply '%s', wanted '%s'\n", resp, expectedResp) } } func TestEncBuiltinRequestReceivesMsg(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() expectedResp := "I can help!" ec.Subscribe("help", func(subj, reply, req string) { ec.Publish(reply, expectedResp) }) var resp nats.Msg err := ec.Request("help", "help me", &resp, 1*time.Second) if err != nil { t.Fatalf("Failed at receiving proper response: %v\n", err) } if string(resp.Data) != expectedResp { t.Fatalf("Received reply '%s', wanted '%s'\n", string(resp.Data), expectedResp) } } func TestEncBuiltinAsyncMarshalErr(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" subject := "err_marshall" ec.Subscribe(subject, func(subj, num int) { // This will never get called. }) ec.Conn.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, err error) { ch <- true } ec.Publish(subject, testString) if e := Wait(ch); e != nil { t.Fatalf("Did not receive the message: %s", e) } } func TestEncBuiltinEncodeNil(t *testing.T) { de := &builtin.DefaultEncoder{} _, err := de.Encode("foo", nil) if err != nil { t.Fatalf("Expected no error encoding nil: %v", err) } } func TestEncBuiltinDecodeDefault(t *testing.T) { de := &builtin.DefaultEncoder{} b, err := de.Encode("foo", 22) if err != nil { t.Fatalf("Expected no error encoding number: %v", err) } var c chan bool err = de.Decode("foo", b, &c) if err == nil { t.Fatalf("Expected an error decoding") } } func TestEncDrainSupported(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewDefaultEConn(t) err := ec.Drain() if err != nil { t.Fatalf("Expected no error calling Drain(), got %v", err) } } const ENC_TEST_PORT = 8268 var options = nats.Options{ Url: fmt.Sprintf("nats://127.0.0.1:%d", ENC_TEST_PORT), AllowReconnect: true, MaxReconnect: 10, ReconnectWait: 100 * time.Millisecond, Timeout: nats.DefaultTimeout, } func TestPublishErrorAfterSubscribeDecodeError(t *testing.T) { ts := RunServerOnPort(ENC_TEST_PORT) defer ts.Shutdown() opts := options nc, _ := opts.Connect() defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) //Test message type type Message struct { Message string } const testSubj = "test" c.Subscribe(testSubj, func(msg *Message) {}) // Publish invalid json to catch decode error in subscription callback c.Publish(testSubj, `foo`) c.Flush() // Next publish should be successful if err := c.Publish(testSubj, Message{"2"}); err != nil { t.Error("Fail to send correct json message after decode error in subscription") } } func TestPublishErrorAfterInvalidPublishMessage(t *testing.T) { ts := RunServerOnPort(ENC_TEST_PORT) defer ts.Shutdown() opts := options nc, _ := opts.Connect() defer nc.Close() c, _ := nats.NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) const testSubj = "test" c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}) // Publish invalid protobuf message to catch decode error c.Publish(testSubj, "foo") // Next publish with valid protobuf message should be successful if err := c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}); err != nil { t.Error("Fail to send correct protobuf message after invalid message publishing", err) } } func TestVariousFailureConditions(t *testing.T) { ts := RunServerOnPort(ENC_TEST_PORT) defer ts.Shutdown() dch := make(chan bool) opts := options opts.AsyncErrorCB = func(_ *nats.Conn, _ *nats.Subscription, e error) { dch <- true } nc, _ := opts.Connect() nc.Close() if _, err := nats.NewEncodedConn(nil, protobuf.PROTOBUF_ENCODER); err == nil { t.Fatal("Expected an error") } if _, err := nats.NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER); err == nil || err != nats.ErrConnectionClosed { t.Fatalf("Wrong error: %v instead of %v", err, nats.ErrConnectionClosed) } nc, _ = opts.Connect() defer nc.Close() if _, err := nats.NewEncodedConn(nc, "foo"); err == nil { t.Fatal("Expected an error") } c, err := nats.NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() if _, err := c.Subscribe("bar", func(subj, obj string) {}); err != nil { t.Fatalf("Unable to create subscription: %v", err) } if err := c.Publish("bar", &testdata.Person{Name: "Ivan"}); err != nil { t.Fatalf("Unable to publish: %v", err) } if err := Wait(dch); err != nil { t.Fatal("Did not get the async error callback") } if err := c.PublishRequest("foo", "bar", "foo"); err == nil { t.Fatal("Expected an error") } if err := c.Request("foo", "foo", nil, 2*time.Second); err == nil { t.Fatal("Expected an error") } nc.Close() if err := c.PublishRequest("foo", "bar", &testdata.Person{Name: "Ivan"}); err == nil { t.Fatal("Expected an error") } resp := &testdata.Person{} if err := c.Request("foo", &testdata.Person{Name: "Ivan"}, resp, 2*time.Second); err == nil { t.Fatal("Expected an error") } if _, err := c.Subscribe("foo", nil); err == nil { t.Fatal("Expected an error") } if _, err := c.Subscribe("foo", func() {}); err == nil { t.Fatal("Expected an error") } func() { defer func() { if r := recover(); r == nil { t.Fatal("Expected an error") } }() if _, err := c.Subscribe("foo", "bar"); err == nil { t.Fatal("Expected an error") } }() } func TesEncodedConnRequest(t *testing.T) { ts := RunServerOnPort(ENC_TEST_PORT) defer ts.Shutdown() dch := make(chan bool) opts := options nc, _ := opts.Connect() defer nc.Close() c, err := nats.NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() sentName := "Ivan" recvName := "Kozlovic" if _, err := c.Subscribe("foo", func(_, reply string, p *testdata.Person) { if p.Name != sentName { t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) } c.Publish(reply, &testdata.Person{Name: recvName}) dch <- true }); err != nil { t.Fatalf("Unable to create subscription: %v", err) } if _, err := c.Subscribe("foo", func(_ string, p *testdata.Person) { if p.Name != sentName { t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) } dch <- true }); err != nil { t.Fatalf("Unable to create subscription: %v", err) } if err := c.Publish("foo", &testdata.Person{Name: sentName}); err != nil { t.Fatalf("Unable to publish: %v", err) } if err := Wait(dch); err != nil { t.Fatal("Did not get message") } if err := Wait(dch); err != nil { t.Fatal("Did not get message") } response := &testdata.Person{} if err := c.Request("foo", &testdata.Person{Name: sentName}, response, 2*time.Second); err != nil { t.Fatalf("Unable to publish: %v", err) } if response.Name != recvName { t.Fatalf("Wrong response: %v instead of %v", response.Name, recvName) } if err := Wait(dch); err != nil { t.Fatal("Did not get message") } if err := Wait(dch); err != nil { t.Fatal("Did not get message") } c2, err := nats.NewEncodedConn(nc, nats.GOB_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c2.Close() if _, err := c2.QueueSubscribe("bar", "baz", func(m *nats.Msg) { response := &nats.Msg{Subject: m.Reply, Data: []byte(recvName)} c2.Conn.PublishMsg(response) dch <- true }); err != nil { t.Fatalf("Unable to create subscription: %v", err) } mReply := nats.Msg{} if err := c2.Request("bar", &nats.Msg{Data: []byte(sentName)}, &mReply, 2*time.Second); err != nil { t.Fatalf("Unable to send request: %v", err) } if string(mReply.Data) != recvName { t.Fatalf("Wrong reply: %v instead of %v", string(mReply.Data), recvName) } if err := Wait(dch); err != nil { t.Fatal("Did not get message") } if c.LastError() != nil { t.Fatalf("Unexpected connection error: %v", c.LastError()) } if c2.LastError() != nil { t.Fatalf("Unexpected connection error: %v", c2.LastError()) } } func TestRequestGOB(t *testing.T) { ts := RunServerOnPort(ENC_TEST_PORT) defer ts.Shutdown() type Request struct { Name string } type Person struct { Name string Age int } nc, err := nats.Connect(options.Url) if err != nil { t.Fatalf("Could not connect: %v", err) } defer nc.Close() ec, err := nats.NewEncodedConn(nc, nats.GOB_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer ec.Close() ec.QueueSubscribe("foo.request", "g", func(subject, reply string, r *Request) { if r.Name != "meg" { t.Fatalf("Expected request to be 'meg', got %q", r) } response := &Person{Name: "meg", Age: 21} ec.Publish(reply, response) }) reply := Person{} if err := ec.Request("foo.request", &Request{Name: "meg"}, &reply, time.Second); err != nil { t.Fatalf("Failed to receive response: %v", err) } if reply.Name != "meg" || reply.Age != 21 { t.Fatalf("Did not receive proper response, %+v", reply) } } func TestContextEncodedRequestWithTimeout(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() deadline := time.Now().Add(100 * time.Millisecond) ctx, cancelCB := context.WithDeadline(context.Background(), deadline) defer cancelCB() // should always be called, not discarded, to prevent context leak type request struct { Message string `json:"message"` } type response struct { Code int `json:"code"` } c.Subscribe("slow", func(_, reply string, req *request) { got := req.Message expected := "Hello" if got != expected { t.Errorf("Expected to receive request with %q, got %q", got, expected) } // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) c.Publish(reply, &response{Code: 200}) }) for i := 0; i < 2; i++ { req := &request{Message: "Hello"} resp := &response{} err := c.RequestWithContext(ctx, "slow", req, resp) if err != nil { t.Fatalf("Expected encoded request with context to not fail: %s", err) } got := resp.Code expected := 200 if got != expected { t.Errorf("Expected to receive %v, got: %v", expected, got) } } // A third request with latency would make the context // reach the deadline. req := &request{Message: "Hello"} resp := &response{} err = c.RequestWithContext(ctx, "slow", req, resp) if err == nil { t.Fatal("Expected request with context to reach deadline") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error Timeout interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Errorf("Expected to have a timeout error") } expected := `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextEncodedRequestWithTimeoutCanceled(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() ctx, cancelCB := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancelCB() // should always be called, not discarded, to prevent context leak type request struct { Message string `json:"message"` } type response struct { Code int `json:"code"` } c.Subscribe("fast", func(_, reply string, req *request) { got := req.Message expected := "Hello" if got != expected { t.Errorf("Expected to receive request with %q, got %q", got, expected) } // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) c.Publish(reply, &response{Code: 200}) }) // Fast request should not fail req := &request{Message: "Hello"} resp := &response{} c.RequestWithContext(ctx, "fast", req, resp) expectedCode := 200 if resp.Code != expectedCode { t.Errorf("Expected to receive %d, got: %d", expectedCode, resp.Code) } // Cancel the context already so that rest of requests fail. cancelCB() err = c.RequestWithContext(ctx, "fast", req, resp) if err == nil { t.Fatal("Expected request with timeout context to fail") } // Reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected := `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } // 2nd request should fail again even if fast because context has already been canceled err = c.RequestWithContext(ctx, "fast", req, resp) if err == nil { t.Fatal("Expected request with timeout context to fail") } } func TestContextEncodedRequestWithCancel(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() ctx, cancelCB := context.WithCancel(context.Background()) defer cancelCB() // should always be called, not discarded, to prevent context leak // timer which cancels the context though can also be arbitrarily extended expirationTimer := time.AfterFunc(100*time.Millisecond, func() { cancelCB() }) type request struct { Message string `json:"message"` } type response struct { Code int `json:"code"` } c.Subscribe("slow", func(_, reply string, req *request) { got := req.Message expected := "Hello" if got != expected { t.Errorf("Expected to receive request with %q, got %q", got, expected) } // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) c.Publish(reply, &response{Code: 200}) }) c.Subscribe("slower", func(_, reply string, req *request) { got := req.Message expected := "World" if got != expected { t.Errorf("Expected to receive request with %q, got %q", got, expected) } // we know this request will take longer so extend the timeout expirationTimer.Reset(100 * time.Millisecond) // slower reply which would have hit original timeout time.Sleep(90 * time.Millisecond) c.Publish(reply, &response{Code: 200}) }) for i := 0; i < 2; i++ { req := &request{Message: "Hello"} resp := &response{} err := c.RequestWithContext(ctx, "slow", req, resp) if err != nil { t.Fatalf("Expected encoded request with context to not fail: %s", err) } got := resp.Code expected := 200 if got != expected { t.Errorf("Expected to receive %v, got: %v", expected, got) } } // A third request with latency would make the context // get canceled, but these reset the timer so deadline // gets extended: for i := 0; i < 10; i++ { req := &request{Message: "World"} resp := &response{} err := c.RequestWithContext(ctx, "slower", req, resp) if err != nil { t.Fatalf("Expected request with context to not fail: %s", err) } got := resp.Code expected := 200 if got != expected { t.Errorf("Expected to receive %d, got: %d", expected, got) } } req := &request{Message: "Hello"} resp := &response{} // One more slow request will expire the timer and cause an error... err = c.RequestWithContext(ctx, "slow", req, resp) if err == nil { t.Fatal("Expected request with cancellation context to fail") } // ...though reported error is "context canceled" from Context package, // which is not a timeout error. type timeoutError interface { Timeout() bool } if _, ok := err.(timeoutError); ok { t.Errorf("Expected to not have a timeout error") } expected := `context canceled` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestContextEncodedRequestWithDeadline(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() deadline := time.Now().Add(100 * time.Millisecond) ctx, cancelCB := context.WithDeadline(context.Background(), deadline) defer cancelCB() // should always be called, not discarded, to prevent context leak type request struct { Message string `json:"message"` } type response struct { Code int `json:"code"` } c.Subscribe("slow", func(_, reply string, req *request) { got := req.Message expected := "Hello" if got != expected { t.Errorf("Expected to receive request with %q, got %q", got, expected) } // simulates latency into the client so that timeout is hit. time.Sleep(40 * time.Millisecond) c.Publish(reply, &response{Code: 200}) }) for i := 0; i < 2; i++ { req := &request{Message: "Hello"} resp := &response{} err := c.RequestWithContext(ctx, "slow", req, resp) if err != nil { t.Fatalf("Expected encoded request with context to not fail: %s", err) } got := resp.Code expected := 200 if got != expected { t.Errorf("Expected to receive %v, got: %v", expected, got) } } // A third request with latency would make the context // reach the deadline. req := &request{Message: "Hello"} resp := &response{} err = c.RequestWithContext(ctx, "slow", req, resp) if err == nil { t.Fatal("Expected request with context to reach deadline") } // Reported error is "context deadline exceeded" from Context package, // which implements net.Error Timeout interface. type timeoutError interface { Timeout() bool } timeoutErr, ok := err.(timeoutError) if !ok || !timeoutErr.Timeout() { t.Errorf("Expected to have a timeout error") } expected := `context deadline exceeded` if !strings.Contains(err.Error(), expected) { t.Errorf("Expected %q error, got: %q", expected, err.Error()) } } func TestEncodedContextInvalid(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) if err != nil { t.Fatalf("Unable to create encoded connection: %v", err) } defer c.Close() type request struct { Message string `json:"message"` } type response struct { Code int `json:"code"` } req := &request{Message: "Hello"} resp := &response{} //lint:ignore SA1012 testing that passing nil fails err = c.RequestWithContext(nil, "slow", req, resp) if err == nil { t.Fatal("Expected request to fail with error") } if err != nats.ErrInvalidContext { t.Errorf("Expected request to fail with invalid context: %s", err) } } nats.go-1.41.0/test/gob_test.go000066400000000000000000000067521477351342400163060ustar00rootroot00000000000000// Copyright 2012-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "reflect" "testing" "github.com/nats-io/nats.go" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn func NewGobEncodedConn(tl TestLogger) *nats.EncodedConn { ec, err := nats.NewEncodedConn(NewConnection(tl, TEST_PORT), nats.GOB_ENCODER) if err != nil { tl.Fatalf("Failed to create an encoded connection: %v\n", err) } return ec } func TestEncBuiltinGobMarshalString(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewGobEncodedConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" ec.Subscribe("gob_string", func(s string) { if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } ch <- true }) ec.Publish("gob_string", testString) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinGobMarshalInt(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewGobEncodedConn(t) defer ec.Close() ch := make(chan bool) testN := 22 ec.Subscribe("gob_int", func(n int) { if n != testN { t.Fatalf("Received test int of '%d', wanted '%d'\n", n, testN) } ch <- true }) ec.Publish("gob_int", testN) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinGobMarshalStruct(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewGobEncodedConn(t) defer ec.Close() ch := make(chan bool) me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*person) me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} me.Assets = make(map[string]uint) me.Assets["house"] = 1000 me.Assets["car"] = 100 ec.Subscribe("gob_struct", func(p *person) { if !reflect.DeepEqual(p, me) { t.Fatalf("Did not receive the correct struct response") } ch <- true }) ec.Publish("gob_struct", me) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func BenchmarkPublishGobStruct(b *testing.B) { // stop benchmark for set-up b.StopTimer() s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewGobEncodedConn(b) defer ec.Close() ch := make(chan bool) me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*person) me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} ec.Subscribe("gob_struct", func(p *person) { if !reflect.DeepEqual(p, me) { b.Fatalf("Did not receive the correct struct response") } ch <- true }) // resume benchmark b.StartTimer() for n := 0; n < b.N; n++ { ec.Publish("gob_struct", me) if e := Wait(ch); e != nil { b.Fatal("Did not receive the message") } } } nats.go-1.41.0/test/headers_test.go000066400000000000000000000315601477351342400171450ustar00rootroot00000000000000// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "fmt" "net/http" "reflect" "sort" "sync" "testing" "time" "net/http/httptest" "github.com/nats-io/nats-server/v2/server" natsserver "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" ) func TestBasicHeaders(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() subject := "headers.test" sub, err := nc.SubscribeSync(subject) if err != nil { t.Fatalf("Could not subscribe to %q: %v", subject, err) } defer sub.Unsubscribe() m := nats.NewMsg(subject) m.Header.Add("Accept-Encoding", "json") m.Header.Add("Authorization", "s3cr3t") m.Data = []byte("Hello Headers!") nc.PublishMsg(m) msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Did not receive response: %v", err) } if !m.Equal(msg) { t.Fatalf("Messages did not match! \n%+v\n%+v\n", m, msg) } } func TestRequestMsg(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() subject := "headers.test" sub, err := nc.Subscribe(subject, func(m *nats.Msg) { if m.Header.Get("Hdr-Test") != "1" { m.Respond([]byte("-ERR")) } r := nats.NewMsg(m.Reply) r.Header = m.Header r.Data = []byte("+OK") m.RespondMsg(r) }) if err != nil { t.Fatalf("subscribe failed: %v", err) } defer sub.Unsubscribe() msg := nats.NewMsg(subject) msg.Header.Add("Hdr-Test", "1") resp, err := nc.RequestMsg(msg, time.Second) if err != nil { t.Fatalf("Expected request to be published: %v", err) } if string(resp.Data) != "+OK" { t.Fatalf("Headers were not published to the requestor") } if resp.Header.Get("Hdr-Test") != "1" { t.Fatalf("Did not receive header in response") } if err = nc.PublishMsg(nil); err != nats.ErrInvalidMsg { t.Errorf("Unexpected error: %v", err) } if _, err = nc.RequestMsg(nil, time.Second); err != nats.ErrInvalidMsg { t.Errorf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) defer cancel() if _, err = nc.RequestMsgWithContext(ctx, nil); err != nats.ErrInvalidMsg { t.Errorf("Unexpected error: %v", err) } } func TestRequestMsgRaceAsyncInfo(t *testing.T) { s1Opts := natsserver.DefaultTestOptions s1Opts.Host = "127.0.0.1" s1Opts.Port = -1 s1Opts.Cluster.Name = "CLUSTER" s1Opts.Cluster.Host = "127.0.0.1" s1Opts.Cluster.Port = -1 s := RunServerWithOptions(&s1Opts) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() // Extra client with old request. nc2, err := nats.Connect(s.ClientURL(), nats.UseOldRequestStyle()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc2.Close() subject := "headers.test" if _, err := nc.Subscribe(subject, func(m *nats.Msg) { r := nats.NewMsg(m.Reply) r.Header["Hdr-Test"] = []string{"bar"} r.Data = []byte("+OK") m.RespondMsg(r) }); err != nil { t.Fatalf("subscribe failed: %v", err) } nc.Flush() wg := sync.WaitGroup{} wg.Add(1) ch := make(chan struct{}) go func() { defer wg.Done() s2Opts := natsserver.DefaultTestOptions s2Opts.Host = "127.0.0.1" s2Opts.Port = -1 s2Opts.Cluster.Name = "CLUSTER" s2Opts.Cluster.Host = "127.0.0.1" s2Opts.Cluster.Port = -1 s2Opts.Routes = server.RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", s.ClusterAddr().Port)) for { s := RunServerWithOptions(&s2Opts) s.Shutdown() select { case <-ch: return default: } } }() msg := nats.NewMsg(subject) msg.Header["Hdr-Test"] = []string{"quux"} for i := 0; i < 100; i++ { nc.RequestMsg(msg, time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second) nc.RequestMsgWithContext(ctx, msg) cancel() // Check with old style requests as well. nc2.RequestMsg(msg, time.Second) ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second) nc2.RequestMsgWithContext(ctx2, msg) cancel2() } close(ch) wg.Wait() } func TestNoHeaderSupport(t *testing.T) { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.NoHeaderSupport = true s := RunServerWithOptions(&opts) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() m := nats.NewMsg("foo") m.Header.Add("Authorization", "s3cr3t") m.Data = []byte("Hello Headers!") if err := nc.PublishMsg(m); err != nats.ErrHeadersNotSupported { t.Fatalf("Expected an error, got %v", err) } if _, err := nc.RequestMsg(m, time.Second); err != nats.ErrHeadersNotSupported { t.Fatalf("Expected an error, got %v", err) } } func TestMsgHeadersCasePreserving(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error connecting to server: %v", err) } defer nc.Close() subject := "headers.test" sub, err := nc.SubscribeSync(subject) if err != nil { t.Fatalf("Could not subscribe to %q: %v", subject, err) } defer sub.Unsubscribe() m := nats.NewMsg(subject) // http.Header preserves the original keys and allows case-sensitive // lookup by accessing the map directly. hdr := http.Header{ "CorrelationID": []string{"123"}, "Msg-ID": []string{"456"}, "X-NATS-Keys": []string{"A", "B", "C"}, "X-Test-Keys": []string{"D", "E", "F"}, } // Validate that can be used interchangeably with http.Header type HeaderInterface interface { Add(key, value string) Del(key string) Get(key string) string Set(key, value string) Values(key string) []string } var _ HeaderInterface = http.Header{} var _ HeaderInterface = nats.Header{} // A NATS Header is the same type as http.Header so simple casting // works to use canonical form used in Go HTTP servers if needed, // and it also preserves the same original keys like Go HTTP requests. m.Header = nats.Header(hdr) http.Header(m.Header).Set("accept-encoding", "json") http.Header(m.Header).Add("AUTHORIZATION", "s3cr3t") // Multi Value using the same matching key. m.Header.Set("X-Test", "First") m.Header.Add("X-Test", "Second") m.Header.Add("X-Test", "Third") m.Data = []byte("Simple Headers") nc.PublishMsg(m) msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Did not receive response: %v", err) } // Confirm that received message is just like the one originally sent. if !m.Equal(msg) { t.Fatalf("Messages did not match! \n%+v\n%+v\n", m, msg) } for _, test := range []struct { Header string Values []string }{ {"Accept-Encoding", []string{"json"}}, {"Authorization", []string{"s3cr3t"}}, {"X-Test", []string{"First", "Second", "Third"}}, {"CorrelationID", []string{"123"}}, {"Msg-ID", []string{"456"}}, {"X-NATS-Keys", []string{"A", "B", "C"}}, {"X-Test-Keys", []string{"D", "E", "F"}}, } { // Accessing directly will always work. v1, ok := msg.Header[test.Header] if !ok { t.Errorf("Expected %v to be present", test.Header) } if len(v1) != len(test.Values) { t.Errorf("Expected %v values in header, got: %v", len(test.Values), len(v1)) } // Exact match is preferred and fastest for Get. v2 := msg.Header.Get(test.Header) if v2 == "" { t.Errorf("Expected %v to be present", test.Header) } if v1[0] != v2 { t.Errorf("Expected: %s, got: %v", v1, v2) } for k, val := range test.Values { hdr := msg.Header[test.Header] vv := hdr[k] if val != vv { t.Errorf("Expected %v values in header, got: %v", val, vv) } } if len(test.Values) > 1 { if !reflect.DeepEqual(test.Values, msg.Header.Values(test.Header)) { t.Fatalf("Headers did not match! \n%+v\n%+v\n", test.Values, msg.Header.Values(test.Header)) } } else { got := msg.Header.Get(test.Header) expected := test.Values[0] if got != expected { t.Errorf("Expected %v, got:%v", expected, got) } } } // Validate that headers processed by HTTP requests are not changed by NATS through many hops. errCh := make(chan error, 2) msgCh := make(chan *nats.Msg, 1) sub, err = nc.Subscribe("nats.svc.A", func(msg *nats.Msg) { hdr := msg.Header["x-trace-id"] hdr = append(hdr, "A") msg.Header["x-trace-id"] = hdr msg.Header.Add("X-Result-A", "A") msg.Subject = "nats.svc.B" resp, err := nc.RequestMsg(msg, 2*time.Second) if err != nil { errCh <- err return } resp.Subject = msg.Reply err = nc.PublishMsg(resp) if err != nil { errCh <- err return } }) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() sub, err = nc.Subscribe("nats.svc.B", func(msg *nats.Msg) { hdr := msg.Header["x-trace-id"] hdr = append(hdr, "B") msg.Header["x-trace-id"] = hdr msg.Header.Add("X-Result-B", "B") msg.Subject = msg.Reply msg.Data = []byte("OK!") err := nc.PublishMsg(msg) if err != nil { errCh <- err return } }) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { msg := nats.NewMsg("nats.svc.A") msg.Header = nats.Header(r.Header.Clone()) msg.Header["x-trace-id"] = []string{"S"} msg.Header["Result-ID"] = []string{"OK"} resp, err := nc.RequestMsg(msg, 2*time.Second) if err != nil { errCh <- err return } msgCh <- resp for k, v := range resp.Header { w.Header()[k] = v } // Remove Date from response header for testing. w.Header()["Date"] = nil w.WriteHeader(200) fmt.Fprintln(w, string(resp.Data)) })) defer ts.Close() req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } client := &http.Client{Timeout: 2 * time.Second} resp, err := client.Do(req) if err != nil { t.Fatal(err) } resp.Body.Close() result := resp.Header.Get("X-Result-A") if result != "A" { t.Errorf("Unexpected header value, got: %+v", result) } result = resp.Header.Get("X-Result-B") if result != "B" { t.Errorf("Unexpected header value, got: %+v", result) } select { case <-time.After(1 * time.Second): t.Fatal("Timeout waiting for message.") case err = <-errCh: if err != nil { t.Fatal(err) } case msg = <-msgCh: } if len(msg.Header) != 6 { t.Errorf("Wrong number of headers in NATS message, got: %v", len(msg.Header)) } v, ok := msg.Header["x-trace-id"] if !ok { t.Fatal("Missing headers in message") } if !reflect.DeepEqual(v, []string{"S", "A", "B"}) { t.Fatal("Missing headers in message") } for _, key := range []string{"x-trace-id"} { v = msg.Header.Values(key) if v == nil { t.Fatal("Missing headers in message") } if !reflect.DeepEqual(v, []string{"S", "A", "B"}) { t.Fatal("Missing headers in message") } } t.Run("multi value header", func(t *testing.T) { getHeader := func() nats.Header { return nats.Header{ "foo": []string{"A"}, "Foo": []string{"B"}, "FOO": []string{"C"}, } } hdr := getHeader() got := hdr.Get("foo") expected := "A" if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } got = hdr.Get("Foo") expected = "B" if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } got = hdr.Get("FOO") expected = "C" if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } // No match. got = hdr.Get("fOo") if got != "" { t.Errorf("Unexpected result, got: %v", got) } // Only match explicitly. for _, test := range []struct { key string expectedValues []string }{ {"foo", []string{"A"}}, {"Foo", []string{"B"}}, {"FOO", []string{"C"}}, {"fOO", nil}, {"foO", nil}, } { t.Run("", func(t *testing.T) { hdr := getHeader() result := hdr.Values(test.key) sort.Strings(result) if !reflect.DeepEqual(result, test.expectedValues) { t.Errorf("Expected: %+v, got: %+v", test.expectedValues, result) } if hdr.Get(test.key) == "" { return } // Cleanup all the matching keys. hdr.Del(test.key) got := len(hdr) expected := 2 if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } result = hdr.Values(test.key) if result != nil { t.Errorf("Expected to cleanup all matching keys, got: %+v", result) } if v := hdr.Get(test.key); v != "" { t.Errorf("Expected to cleanup all matching keys, got: %v", v) } }) } }) } nats.go-1.41.0/test/helper_test.go000066400000000000000000000100771477351342400170110ustar00rootroot00000000000000// Copyright 2015-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "os" "runtime" "strings" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" natsserver "github.com/nats-io/nats-server/v2/test" ) const TEST_PORT = 8368 // So that we can pass tests and benchmarks... type tLogger interface { Fatalf(format string, args ...any) Errorf(format string, args ...any) } // TestLogger type TestLogger tLogger // Dumb wait program to sync on callbacks, etc... Will timeout func Wait(ch chan bool) error { return WaitTime(ch, 5*time.Second) } // Wait for a chan with a timeout. func WaitTime(ch chan bool, timeout time.Duration) error { select { case <-ch: return nil case <-time.After(timeout): } return errors.New("timeout") } func WaitOnChannel[T comparable](t *testing.T, ch <-chan T, expected T) { t.Helper() select { case s := <-ch: if s != expected { t.Fatalf("Expected result: %v; got: %v", expected, s) } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for result %v", expected) } } func stackFatalf(t tLogger, f string, args ...any) { lines := make([]string, 0, 32) msg := fmt.Sprintf(f, args...) lines = append(lines, msg) // Generate the Stack of callers: Skip us and verify* frames. for i := 1; true; i++ { _, file, line, ok := runtime.Caller(i) if !ok { break } msg := fmt.Sprintf("%d - %s:%d", i, file, line) lines = append(lines, msg) } t.Fatalf("%s", strings.Join(lines, "\n")) } //////////////////////////////////////////////////////////////////////////////// // Creating client connections //////////////////////////////////////////////////////////////////////////////// // NewDefaultConnection func NewDefaultConnection(t tLogger) *nats.Conn { return NewConnection(t, nats.DefaultPort) } // NewConnection forms connection on a given port. func NewConnection(t tLogger, port int) *nats.Conn { url := fmt.Sprintf("nats://127.0.0.1:%d", port) nc, err := nats.Connect(url) if err != nil { t.Fatalf("Failed to create default connection: %v\n", err) return nil } return nc } //////////////////////////////////////////////////////////////////////////////// // Running nats server in separate Go routines //////////////////////////////////////////////////////////////////////////////// // RunDefaultServer will run a server on the default port. func RunDefaultServer() *server.Server { return RunServerOnPort(nats.DefaultPort) } // RunServerOnPort will run a server on the given port. func RunServerOnPort(port int) *server.Server { opts := natsserver.DefaultTestOptions opts.Port = port opts.Cluster.Name = "testing" return RunServerWithOptions(&opts) } // RunServerWithOptions will run a server with the given options. func RunServerWithOptions(opts *server.Options) *server.Server { return natsserver.RunServer(opts) } // RunServerWithConfig will run a server with the given configuration file. func RunServerWithConfig(configFile string) (*server.Server, *server.Options) { return natsserver.RunServerWithConfig(configFile) } func RunBasicJetStreamServer() *server.Server { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.JetStream = true return RunServerWithOptions(&opts) } func createConfFile(t *testing.T, content []byte) string { t.Helper() conf, err := os.CreateTemp("", "") if err != nil { t.Fatalf("Error creating conf file: %v", err) } fName := conf.Name() conf.Close() if err := os.WriteFile(fName, content, 0666); err != nil { os.Remove(fName) t.Fatalf("Error writing conf file: %v", err) } return fName } nats.go-1.41.0/test/js_internal_test.go000066400000000000000000000261501477351342400200410ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build internal_testing package test import ( "crypto/sha256" "encoding/base64" "fmt" "math/rand" "strings" "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" ) // Need access to internals for loss testing. func TestJetStreamOrderedConsumer(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "OBJECT", Subjects: []string{"a"}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Will be used as start time to validate proper reset to sequence on retries. startTime := time.Now() // Create a sample asset. msg := make([]byte, 1024*1024) rand.Read(msg) msg = []byte(base64.StdEncoding.EncodeToString(msg)) mlen, sum := len(msg), sha256.Sum256(msg) // Now send into the stream as chunks. const chunkSize = 1024 for i := 0; i < mlen; i += chunkSize { var chunk []byte if mlen-i <= chunkSize { chunk = msg[i:] } else { chunk = msg[i : i+chunkSize] } msg := nats.NewMsg("a") msg.Data = chunk msg.Header.Set("data", "true") js.PublishMsgAsync(msg) } js.PublishAsync("a", nil) // eof select { case <-js.PublishAsyncComplete(): case <-time.After(time.Second): t.Fatalf("Did not receive completion signal") } // Do some tests on simple misconfigurations first. // For ordered delivery a couple of things need to be set properly. // Can't be durable or have ack policy that is not ack none or max deliver set. _, err = js.SubscribeSync("a", nats.OrderedConsumer(), nats.Durable("dlc")) if err == nil || !strings.Contains(err.Error(), "ordered consumer") { t.Fatalf("Expected an error, got %v", err) } _, err = js.SubscribeSync("a", nats.OrderedConsumer(), nats.AckExplicit()) if err == nil || !strings.Contains(err.Error(), "ordered consumer") { t.Fatalf("Expected an error, got %v", err) } _, err = js.SubscribeSync("a", nats.OrderedConsumer(), nats.MaxDeliver(10)) if err == nil || !strings.Contains(err.Error(), "ordered consumer") { t.Fatalf("Expected an error, got %v", err) } _, err = js.SubscribeSync("a", nats.OrderedConsumer(), nats.DeliverSubject("some.subject")) if err == nil || !strings.Contains(err.Error(), "ordered consumer") { t.Fatalf("Expected an error, got %v", err) } si, err := js.StreamInfo("OBJECT") if err != nil { t.Fatalf("Unexpected error: %v", err) } testConsumer := func() { t.Helper() var received uint32 var rmsg []byte done := make(chan bool, 1) cb := func(m *nats.Msg) { // Check for eof if len(m.Data) == 0 { done <- true return } atomic.AddUint32(&received, 1) rmsg = append(rmsg, m.Data...) } // OrderedConsumer does not need HB, it sets it on its own, but for test we override which is ok. sub, err := js.Subscribe("a", cb, nats.OrderedConsumer(), nats.IdleHeartbeat(250*time.Millisecond), nats.StartTime(startTime)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() select { case <-done: if rsum := sha256.Sum256(rmsg); rsum != sum { t.Fatalf("Objects do not match") } case <-time.After(5 * time.Second): t.Fatalf("Did not receive all chunks, only %d of %d total", atomic.LoadUint32(&received), si.State.Msgs-1) } } testSyncConsumer := func() { t.Helper() var received int var rmsg []byte // OrderedConsumer does not need HB, it sets it on its own, but for test we override which is ok. sub, err := js.SubscribeSync("a", nats.OrderedConsumer(), nats.IdleHeartbeat(250*time.Millisecond), nats.StartTime(startTime)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() var done bool expires := time.Now().Add(5 * time.Second) for time.Now().Before(expires) { m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(m.Data) == 0 { done = true break } received++ rmsg = append(rmsg, m.Data...) } if !done { t.Fatalf("Did not receive all chunks, only %d of %d total", received, si.State.Msgs-1) } if rsum := sha256.Sum256(rmsg); rsum != sum { t.Fatalf("Objects do not match") } } // Now run normal test. testConsumer() testSyncConsumer() // Now introduce some loss. singleLoss := func(m *nats.Msg) *nats.Msg { if rand.Intn(100) <= 10 && m.Header.Get("data") != "" { nc.RemoveMsgFilter("a") return nil } return m } nc.AddMsgFilter("a", singleLoss) testConsumer() nc.AddMsgFilter("a", singleLoss) testSyncConsumer() multiLoss := func(m *nats.Msg) *nats.Msg { if rand.Intn(100) <= 10 && m.Header.Get("data") != "" { return nil } return m } nc.AddMsgFilter("a", multiLoss) testConsumer() testSyncConsumer() firstOnly := func(m *nats.Msg) *nats.Msg { if meta, err := m.Metadata(); err == nil { if meta.Sequence.Consumer == 1 { nc.RemoveMsgFilter("a") return nil } } return m } nc.AddMsgFilter("a", firstOnly) testConsumer() nc.AddMsgFilter("a", firstOnly) testSyncConsumer() lastOnly := func(m *nats.Msg) *nats.Msg { if meta, err := m.Metadata(); err == nil { if meta.Sequence.Stream >= si.State.LastSeq-1 { nc.RemoveMsgFilter("a") return nil } } return m } nc.AddMsgFilter("a", lastOnly) testConsumer() nc.AddMsgFilter("a", lastOnly) testSyncConsumer() } func TestJetStreamOrderedConsumerWithAutoUnsub(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "OBJECT", Subjects: []string{"a"}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } count := int32(0) sub, err := js.Subscribe("a", func(m *nats.Msg) { atomic.AddInt32(&count, 1) }, nats.OrderedConsumer(), nats.IdleHeartbeat(250*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Ask to auto-unsub after 10 messages. sub.AutoUnsubscribe(10) // Set a message filter that will drop 1 message dm := 0 singleLoss := func(m *nats.Msg) *nats.Msg { if m.Header.Get("data") != "" { dm++ if dm == 5 { nc.RemoveMsgFilter("a") return nil } } return m } nc.AddMsgFilter("a", singleLoss) // Now produce 20 messages for i := 0; i < 20; i++ { msg := nats.NewMsg("a") msg.Data = []byte(fmt.Sprintf("msg_%d", i+1)) msg.Header.Set("data", "true") js.PublishMsgAsync(msg) } select { case <-js.PublishAsyncComplete(): case <-time.After(time.Second): t.Fatalf("Did not receive completion signal") } // Wait for the subscription to be marked as invalid deadline := time.Now().Add(time.Second) ok := false for time.Now().Before(deadline) { if !sub.IsValid() { ok = true break } } if !ok { t.Fatalf("Subscription still valid") } // Wait a bit to make sure we are not receiving more than expected, // and give a chance for the server to process the auto-unsub // protocol. time.Sleep(500 * time.Millisecond) if n := atomic.LoadInt32(&count); n != 10 { t.Fatalf("Sub should have received only 10 messages, got %v", n) } // Now capture the in msgs count for the connection inMsgs := nc.Stats().InMsgs // Send one more message and this count should not increase if the // server had properly processed the auto-unsub after the // reset of the ordered consumer. Use a different connection // to send. nc2, js2 := jsClient(t, s) defer nc2.Close() js2.Publish("a", []byte("should not be received")) newInMsgs := nc.Stats().InMsgs if inMsgs != newInMsgs { t.Fatal("Seems that AUTO-UNSUB was not properly handled") } } func TestJetStreamSubscribeReconnect(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) rch := make(chan struct{}, 1) nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(func(_ *nats.Conn) { select { case rch <- struct{}{}: default: } })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.SubscribeSync("foo", nats.Durable("bar")) if err != nil { t.Fatalf("Error on subscribe: %v", err) } sendAndReceive := func(msgContent string) { t.Helper() var ok bool var err error for i := 0; i < 5; i++ { if _, err = js.Publish("foo", []byte(msgContent)); err != nil { time.Sleep(250 * time.Millisecond) continue } ok = true break } if !ok { t.Fatalf("Error on publish: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatal("Did not get message") } if string(msg.Data) != msgContent { t.Fatalf("Unexpected content: %q", msg.Data) } if err := msg.AckSync(); err != nil { t.Fatalf("Error on ack: %v", err) } } sendAndReceive("msg1") // Cause a disconnect... nc.CloseTCPConn() // Wait for reconnect select { case <-rch: case <-time.After(time.Second): t.Fatal("Did not reconnect") } // Make sure we can send and receive the msg sendAndReceive("msg2") } func TestJetStreamFlowControlStalled(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"a"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.SubscribeSync("a", nats.DeliverSubject("ds"), nats.Durable("dur"), nats.IdleHeartbeat(200*time.Millisecond), nats.EnableFlowControl()); err != nil { t.Fatalf("Error on subscribe: %v", err) } // Drop all incoming FC control messages. jsCtrlFC := 2 fcLoss := func(m *nats.Msg) *nats.Msg { if _, ctrlType := nats.IsJSControlMessage(m); ctrlType == jsCtrlFC { return nil } return m } nc.AddMsgFilter("ds", fcLoss) // Have a subscription on the FC subject to make sure that the library // respond to the requests for un-stall checkSub, err := nc.SubscribeSync("$JS.FC.>") if err != nil { t.Fatalf("Error on sub: %v", err) } // Publish bunch of messages. payload := make([]byte, 100*1024) for i := 0; i < 250; i++ { nc.Publish("a", payload) } // Now wait that we respond to a stalled FC if _, err := checkSub.NextMsg(2 * time.Second); err != nil { t.Fatal("Library did not send FC") } } nats.go-1.41.0/test/js_test.go000066400000000000000000011071601477351342400161470ustar00rootroot00000000000000// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "crypto/rand" "encoding/json" "errors" "fmt" mrand "math/rand" "net" "net/url" "os" "reflect" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" natsserver "github.com/nats-io/nats-server/v2/test" ) func shutdownJSServerAndRemoveStorage(t *testing.T, s *server.Server) { t.Helper() var sd string if config := s.JetStreamConfig(); config != nil { sd = config.StoreDir } s.Shutdown() if sd != "" { if err := os.RemoveAll(sd); err != nil { t.Fatalf("Unable to remove storage %q: %v", sd, err) } } s.WaitForShutdown() } func restartBasicJSServer(t *testing.T, s *server.Server) *server.Server { opts := natsserver.DefaultTestOptions clientURL, err := url.Parse(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } port, err := strconv.Atoi(clientURL.Port()) if err != nil { t.Fatalf("Unexpected error: %v", err) } opts.Port = port opts.JetStream = true opts.StoreDir = s.JetStreamConfig().StoreDir s.Shutdown() s.WaitForShutdown() return RunServerWithOptions(&opts) } func TestJetStreamNotEnabled(t *testing.T) { s := RunServerOnPort(-1) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AccountInfo(); err != nats.ErrJetStreamNotEnabled { t.Fatalf("Did not get the proper error, got %v", err) } } func TestJetStreamErrors(t *testing.T) { t.Run("API error", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: rip jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts: { JS: { jetstream: enabled users: [ {user: dlc, password: foo} ] }, IU: { users: [ {user: rip, password: bar} ] }, } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AccountInfo() // check directly to var (backwards compatible) if err != nats.ErrJetStreamNotEnabledForAccount { t.Fatalf("Did not get the proper error, got %v", err) } // matching via errors.Is if ok := errors.Is(err, nats.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected ErrJetStreamNotEnabledForAccount") } // matching wrapped via error.Is err2 := fmt.Errorf("custom error: %w", nats.ErrJetStreamNotEnabledForAccount) if ok := errors.Is(err2, nats.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected wrapped ErrJetStreamNotEnabled") } // via classic type assertion. jserr, ok := err.(nats.JetStreamError) if !ok { t.Fatal("Expected a JetStreamError") } expected := nats.JSErrCodeJetStreamNotEnabledForAccount if jserr.APIError().ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, jserr.APIError().ErrorCode) } if jserr.APIError() == nil { t.Fatal("Expected APIError") } // matching to interface via errors.As(...) var apierr nats.JetStreamError ok = errors.As(err, &apierr) if !ok { t.Fatal("Expected a JetStreamError") } if apierr.APIError() == nil { t.Fatal("Expected APIError") } if apierr.APIError().ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, apierr.APIError().ErrorCode) } expectedMessage := "nats: jetstream not enabled for account" if apierr.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, apierr.Error()) } // an APIError also implements the JetStreamError interface. var _ nats.JetStreamError = &nats.APIError{} // matching arbitrary custom error via errors.Is(...) customErr := &nats.APIError{ErrorCode: expected} if ok := errors.Is(customErr, nats.ErrJetStreamNotEnabledForAccount); !ok { t.Fatal("Expected wrapped ErrJetStreamNotEnabledForAccount") } customErr = &nats.APIError{ErrorCode: 1} if ok := errors.Is(customErr, nats.ErrJetStreamNotEnabledForAccount); ok { t.Fatal("Expected to not match ErrJetStreamNotEnabled") } var cerr nats.JetStreamError if ok := errors.As(customErr, &cerr); !ok { t.Fatal("Expected custom error to be a JetStreamError") } // matching to concrete type via errors.As(...) var aerr *nats.APIError ok = errors.As(err, &aerr) if !ok { t.Fatal("Expected an APIError") } if aerr.ErrorCode != expected { t.Fatalf("Expected: %v, got: %v", expected, aerr.ErrorCode) } expectedMessage = "nats: jetstream not enabled for account" if aerr.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, apierr.Error()) } }) t.Run("test non-api error", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // stream with empty name _, err := js.AddStream(&nats.StreamConfig{}) if err == nil { t.Fatalf("Expected error, got nil") } // check directly to var (backwards compatible) if err != nats.ErrStreamNameRequired { t.Fatalf("Expected: %v; got: %v", nats.ErrInvalidStreamName, err) } // matching via errors.Is if ok := errors.Is(err, nats.ErrStreamNameRequired); !ok { t.Fatalf("Expected: %v; got: %v", nats.ErrStreamNameRequired, err) } // matching wrapped via error.Is err2 := fmt.Errorf("custom error: %w", nats.ErrStreamNameRequired) if ok := errors.Is(err2, nats.ErrStreamNameRequired); !ok { t.Fatal("Expected wrapped ErrStreamNameRequired") } // via classic type assertion. jserr, ok := err.(nats.JetStreamError) if !ok { t.Fatal("Expected a JetStreamError") } if jserr.APIError() != nil { t.Fatalf("Expected: empty APIError; got: %v", jserr.APIError()) } // matching to interface via errors.As(...) var jserr2 nats.JetStreamError ok = errors.As(err, &jserr2) if !ok { t.Fatal("Expected a JetStreamError") } if jserr2.APIError() != nil { t.Fatalf("Expected: empty APIError; got: %v", jserr2.APIError()) } expectedMessage := "nats: stream name is required" if jserr2.Error() != expectedMessage { t.Fatalf("Expected: %v, got: %v", expectedMessage, jserr2.Error()) } // matching to concrete type via errors.As(...) var aerr *nats.APIError ok = errors.As(err, &aerr) if ok { t.Fatal("Expected ErrStreamNameRequired not to map to APIError") } }) } func TestJetStreamPublish(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Make sure we get a proper failure when no stream is present. _, err = js.Publish("foo", []byte("Hello JS")) if err != nats.ErrNoStreamResponse { t.Fatalf("Expected a no stream error but got %v", err) } // Create the stream using our client API. si, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"test", "foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Double check that file-based storage is default. if si.Config.Storage != nats.FileStorage { t.Fatalf("Expected FileStorage as default, got %v", si.Config.Storage) } // Lookup the stream for testing. _, err = js.StreamInfo("TEST") if err != nil { t.Fatalf("stream lookup failed: %v", err) } var pa *nats.PubAck expect := func(seq, nmsgs uint64) { t.Helper() if seq > 0 && pa == nil { t.Fatalf("Missing pubAck to test sequence %d", seq) } if pa != nil { if pa.Stream != "TEST" { t.Fatalf("Wrong stream name, expected %q, got %q", "TEST", pa.Stream) } if seq > 0 && pa.Sequence != seq { t.Fatalf("Wrong stream sequence, expected %d, got %d", seq, pa.Sequence) } } stream, err := js.StreamInfo("TEST") if err != nil { t.Fatalf("stream lookup failed: %v", err) } if stream.State.Msgs != nmsgs { t.Fatalf("Expected %d messages, got %d", nmsgs, stream.State.Msgs) } } msg := []byte("Hello JS") // Basic publish like NATS core. pa, err = js.Publish("foo", msg) if err != nil { t.Fatalf("Unexpected publish error: %v", err) } expect(1, 1) // Test stream expectation. pa, err = js.Publish("foo", msg, nats.ExpectStream("ORDERS")) if err == nil || !strings.Contains(err.Error(), "stream does not match") { t.Fatalf("Expected an error, got %v", err) } // Test last sequence expectation. pa, err = js.Publish("foo", msg, nats.ExpectLastSequence(10)) if err == nil || !strings.Contains(err.Error(), "wrong last sequence") { t.Fatalf("Expected an error, got %v", err) } // Messages should have been rejected. expect(0, 1) // Using PublishMsg API and accessing directly the Header map. msg2 := nats.NewMsg("foo") msg2.Header[nats.ExpectedLastSeqHdr] = []string{"10"} pa, err = js.PublishMsg(msg2) if err == nil || !strings.Contains(err.Error(), "wrong last sequence") { t.Fatalf("Expected an error, got %v", err) } // Messages should have been rejected. expect(0, 1) // Send in a stream with a msgId pa, err = js.Publish("foo", msg, nats.MsgId("ZZZ")) if err != nil { t.Fatalf("Unexpected publish error: %v", err) } expect(2, 2) // Send in the same message with same msgId. pa, err = js.Publish("foo", msg, nats.MsgId("ZZZ")) if err != nil { t.Fatalf("Unexpected publish error: %v", err) } if pa.Sequence != 2 { t.Fatalf("Expected sequence of 2, got %d", pa.Sequence) } if !pa.Duplicate { t.Fatalf("Expected duplicate to be set") } expect(2, 2) // Now try to send one in with the wrong last msgId. pa, err = js.Publish("foo", msg, nats.ExpectLastMsgId("AAA")) if err == nil || !strings.Contains(err.Error(), "wrong last msg") { t.Fatalf("Expected an error, got %v", err) } // Make sure expected sequence works. pa, err = js.Publish("foo", msg, nats.ExpectLastSequence(22)) if err == nil || !strings.Contains(err.Error(), "wrong last sequence") { t.Fatalf("Expected an error, got %v", err) } expect(0, 2) // This should work ok. pa, err = js.Publish("foo", msg, nats.ExpectLastSequence(2)) if err != nil { t.Fatalf("Unexpected publish error: %v", err) } expect(3, 3) // JetStream Headers are case-sensitive right now, // so this will not activate the check. msg3 := nats.NewMsg("foo") msg3.Header["nats-expected-last-sequence"] = []string{"4"} pa, err = js.PublishMsg(msg3) if err != nil { t.Fatalf("Expected an error, got %v", err) } expect(4, 4) // Now test context and timeouts. // Both set should fail. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() _, err = js.Publish("foo", msg, nats.AckWait(time.Second), nats.Context(ctx)) if err != nats.ErrContextAndTimeout { t.Fatalf("Expected %q, got %q", nats.ErrContextAndTimeout, err) } // Create dummy listener for timeout and context tests. sub, err := nc.SubscribeSync("baz") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() _, err = js.Publish("baz", msg, nats.AckWait(time.Nanosecond)) if err != nats.ErrTimeout { t.Fatalf("Expected %q, got %q", nats.ErrTimeout, err) } go cancel() _, err = js.Publish("baz", msg, nats.Context(ctx)) if err != context.Canceled { t.Fatalf("Expected %q, got %q", context.Canceled, err) } // Test ExpectLastSequencePerSubject. Just make sure that we set the header. sub, err = nc.SubscribeSync("test") if err != nil { t.Fatalf("Error on subscribe: %v", err) } js.Publish("test", []byte("msg"), nats.ExpectLastSequencePerSubject(1)) m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next msg: %v", err) } if m.Header.Get(nats.ExpectedLastSubjSeqHdr) != "1" { t.Fatalf("Header ExpectLastSequencePerSubject not set: %+v", m.Header) } } func TestPublishWithTTL(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64, AllowMsgTTL: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := js.Publish("FOO.1", []byte("msg"), nats.MsgTTL(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } gotMsg, err := js.GetMsg("foo", ack.Sequence) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "1s" { t.Fatalf("Expected message to have TTL header set to 1s; got: %s", ttl) } time.Sleep(1500 * time.Millisecond) _, err = js.GetMsg("foo", ack.Sequence) if !errors.Is(err, nats.ErrMsgNotFound) { t.Fatalf("Expected not found error; got: %v", err) } } func TestMsgDeleteMarkerMaxAge(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, AllowMsgTTL: true, SubjectDeleteMarkerTTL: 50 * time.Second, MaxAge: 1 * time.Second}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.Publish("FOO.1", []byte("msg1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } time.Sleep(1500 * time.Millisecond) gotMsg, err := js.GetLastMsg("foo", "FOO.1") if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttlMarker := gotMsg.Header.Get("Nats-Marker-Reason"); ttlMarker != "MaxAge" { t.Fatalf("Expected message to have Marker-Reason header set to MaxAge; got: %s", ttlMarker) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "50s" { t.Fatalf("Expected message to have Nats-TTL header set to 50s; got: %s", ttl) } } func TestPublishAsyncWithTTL(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"FOO.*"}, MaxMsgSize: 64, AllowMsgTTL: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } paf, err := js.PublishAsync("FOO.1", []byte("msg"), nats.MsgTTL(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } var ack *nats.PubAck select { case ack = <-paf.Ok(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive ack") } gotMsg, err := js.GetMsg("foo", ack.Sequence) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ttl := gotMsg.Header.Get("Nats-TTL"); ttl != "1s" { t.Fatalf("Expected message to have TTL header set to 1s; got: %s", ttl) } time.Sleep(1500 * time.Millisecond) _, err = js.GetMsg("foo", ack.Sequence) if !errors.Is(err, nats.ErrMsgNotFound) { t.Fatalf("Expected not found error; got: %v", err) } } func TestJetStreamSubscribe(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error expectConsumers := func(t *testing.T, expected int) { t.Helper() checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { var infos []*nats.ConsumerInfo for info := range js.Consumers("TEST") { infos = append(infos, info) } if len(infos) != expected { return fmt.Errorf("Expected %d consumers, got: %d", expected, len(infos)) } return nil }) } // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar", "baz", "foo.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Lookup the stream for testing. _, err = js.StreamInfo("TEST") if err != nil { t.Fatalf("stream lookup failed: %v", err) } // If stream name is not specified, then the subject is required. if _, err := js.SubscribeSync(""); err == nil || !strings.Contains(err.Error(), "required") { t.Fatalf("Unexpected error: %v", err) } // Check that if stream name is present, then technically the subject does not have to. sub, err := js.SubscribeSync("", nats.BindStream("TEST")) if err != nil { t.Fatalf("Unexpected error: %v", err) } initialPending, err := sub.InitialConsumerPending() if err != nil { t.Fatalf("Unexpected error: %v", err) } if initialPending != 0 { t.Fatalf("Expected no initial pending, got %d", initialPending) } sub.Unsubscribe() // Check that Queue subscribe with HB or FC fails. _, err = js.QueueSubscribeSync("foo", "wq", nats.IdleHeartbeat(time.Second)) if err == nil || !strings.Contains(err.Error(), "heartbeat") { t.Fatalf("Unexpected error: %v", err) } _, err = js.QueueSubscribeSync("foo", "wq", nats.EnableFlowControl()) if err == nil || !strings.Contains(err.Error(), "flow control") { t.Fatalf("Unexpected error: %v", err) } // Check that Queue subscribe without durable name requires queue name // to not have "." in the name. _, err = js.QueueSubscribeSync("foo", "bar.baz") if err != nats.ErrInvalidConsumerName { t.Fatalf("Unexpected error: %v", err) } msg := []byte("Hello JS") // Basic publish like NATS core. js.Publish("foo", msg) q := make(chan *nats.Msg, 4) checkSub, err := nc.SubscribeSync("ivan") if err != nil { t.Fatalf("Error on sub: %v", err) } // Now create a simple ephemeral consumer. sub1, err := js.Subscribe("foo", func(m *nats.Msg) { q <- m }, nats.DeliverSubject("ivan")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub1.Unsubscribe() select { case m := <-q: if _, err := m.Metadata(); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := checkSub.NextMsg(time.Second); err != nil { t.Fatal("Wrong deliver subject") } case <-time.After(5 * time.Second): t.Fatalf("Did not receive the messages in time") } // Now do same but sync. sub2, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub2.Unsubscribe() waitForPending := func(t *testing.T, sub *nats.Subscription, n int) { t.Helper() timeout := time.Now().Add(2 * time.Second) for time.Now().Before(timeout) { if msgs, _, _ := sub.Pending(); msgs == n { return } time.Sleep(10 * time.Millisecond) } msgs, _, _ := sub.Pending() t.Fatalf("Expected to receive %d messages, but got %d", n, msgs) } waitForPending(t, sub2, 1) toSend := 10 for i := 0; i < toSend; i++ { js.Publish("bar", msg) } done := make(chan bool, 1) var received int sub3, err := js.Subscribe("bar", func(m *nats.Msg) { received++ if received == toSend { done <- true } }) if err != nil { t.Fatalf("Unexpected error: %v", err) } expectConsumers(t, 3) defer sub3.Unsubscribe() initialPending, err = sub3.InitialConsumerPending() if err != nil { t.Fatalf("Unexpected error: %v", err) } if initialPending != 10 { t.Fatalf("Expected initial pending of 10, got %d", initialPending) } select { case <-done: case <-time.After(5 * time.Second): t.Fatalf("Did not receive all of the messages in time") } // If we are here we have received all of the messages. // We hang the ConsumerInfo option off of the subscription, so we use that to check status. // We may need to retry this check since the acks sent by the client have to be processed // on the server. checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { info, _ := sub3.ConsumerInfo() if info.Config.AckPolicy != nats.AckExplicitPolicy { t.Fatalf("Expected ack explicit policy, got %q", info.Config.AckPolicy) } if info.Delivered.Consumer != uint64(toSend) { return fmt.Errorf("Expected to have received all %d messages, got %d", toSend, info.Delivered.Consumer) } // Make sure we auto-ack'd if info.AckFloor.Consumer != uint64(toSend) { return fmt.Errorf("Expected to have ack'd all %d messages, got ack floor of %d", toSend, info.AckFloor.Consumer) } return nil }) sub3.Unsubscribe() sub2.Unsubscribe() sub1.Unsubscribe() expectConsumers(t, 0) // Now create a sync subscriber that is durable. dname := "derek" sub, err = js.SubscribeSync("foo", nats.Durable(dname)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() expectConsumers(t, 1) // Make sure we registered as a durable. info, _ := sub.ConsumerInfo() if info.Config.Durable != dname { t.Fatalf("Expected durable name to be set to %q, got %q", dname, info.Config.Durable) } deliver := info.Config.DeliverSubject // Drain subscription, this will delete the consumer. go func() { time.Sleep(250 * time.Millisecond) for { if _, err := sub.NextMsg(500 * time.Millisecond); err != nil { return } } }() sub.Drain() nc.Flush() expectConsumers(t, 0) // This will recreate a new instance. sub, err = js.SubscribeSync("foo", nats.Durable(dname)) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info, err := sub.ConsumerInfo(); err != nil || info.Config.DeliverSubject == deliver { t.Fatal("Expected delivery subject to be different") } expectConsumers(t, 1) // Subscribing again with same subject and durable name is an error. if _, err := js.SubscribeSync("foo", nats.Durable(dname)); err == nil { t.Fatal("Unexpected success") } expectConsumers(t, 1) // Delete the durable. sub.Unsubscribe() expectConsumers(t, 0) // Create again and make sure that works. sub, err = js.SubscribeSync("foo", nats.Durable(dname)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() expectConsumers(t, 1) if deliver == sub.Subject { t.Fatalf("Expected delivery subject to be different then %q", deliver) } sub.Unsubscribe() expectConsumers(t, 0) // Create a queue group on "bar" with no explicit durable name, which // means that the queue name will be used as the durable name. sub1, err = js.QueueSubscribeSync("bar", "v0") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub1.Unsubscribe() waitForPending(t, sub1, 10) expectConsumers(t, 1) // Since the above JS consumer is created on subject "bar", trying to // add a member to the same group but on subject "baz" should fail. if _, err = js.QueueSubscribeSync("baz", "v0"); err == nil { t.Fatal("Unexpected success") } // If the queue group is different, but we try to attach to the existing // JS consumer that is created for group "v0", then this should fail. if _, err = js.QueueSubscribeSync("bar", "v1", nats.Durable("v0")); err == nil { t.Fatal("Unexpected success") } // However, if a durable name is specified, creating a queue sub with // the same queue name is ok, but will feed from a different JS consumer. sub2, err = js.QueueSubscribeSync("bar", "v0", nats.Durable("otherQueueDurable")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub2.Unsubscribe() waitForPending(t, sub2, 10) expectConsumers(t, 2) sub1.Unsubscribe() sub2.Unsubscribe() expectConsumers(t, 0) // Now try pull based subscribers. // Check some error conditions first. if _, err := js.Subscribe("bar", nil); err != nats.ErrBadSubscription { t.Fatalf("Expected an error trying to create subscriber with nil callback, got %v", err) } // Since v2.7.0, we can create pull consumers with ephemerals. sub, err = js.PullSubscribe("bar", "") if err != nil { t.Fatalf("Error on subscribe: %v", err) } sub.Unsubscribe() // Pull consumer with AckNone policy sub, err = js.PullSubscribe("bar", "", nats.AckNone()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } sub.Unsubscribe() // Can't specify DeliverSubject for pull subscribers _, err = js.PullSubscribe("bar", "foo", nats.DeliverSubject("baz")) if err != nats.ErrPullSubscribeToPushConsumer { t.Fatalf("Unexpected error: %v", err) } // If stream name is not specified, need the subject. _, err = js.PullSubscribe("", "rip") if err == nil || !strings.Contains(err.Error(), "required") { t.Fatalf("Unexpected error: %v", err) } // If stream provided, it should be ok. sub, err = js.PullSubscribe("", "rip", nats.BindStream("TEST")) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() batch := 5 sub, err = js.PullSubscribe("bar", "rip") if err != nil { t.Fatalf("Unexpected error: %v", err) } expectConsumers(t, 1) // The first batch if available should be delivered and queued up. bmsgs, err := sub.Fetch(batch) if err != nil { t.Fatal(err) } if info, _ := sub.ConsumerInfo(); info.NumAckPending != batch || info.NumPending != uint64(batch) { t.Fatalf("Expected %d pending ack, and %d still waiting to be delivered, got %d and %d", batch, batch, info.NumAckPending, info.NumPending) } // Now go ahead and consume these and ack, but not ack+next. for i := 0; i < batch; i++ { m := bmsgs[i] err = m.AckSync() if err != nil { t.Fatal(err) } } checkFor(t, time.Second, 15*time.Millisecond, func() error { if info, _ := sub.ConsumerInfo(); info.AckFloor.Consumer != uint64(batch) { return fmt.Errorf("Expected ack floor to be %d, got %d", batch, info.AckFloor.Consumer) } return nil }) waitForPending(t, sub, 0) // Make a request for 10 but should only receive a few. bmsgs, err = sub.Fetch(10, nats.MaxWait(2*time.Second)) if err != nil { t.Errorf("Unexpected error: %v", err) } got := len(bmsgs) expected := 5 if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } for _, msg := range bmsgs { msg.AckSync() } // Now test attaching to a pull based durable. // Test that if we are attaching that the subjects will match up. rip from // above was created with a filtered subject of bar, so this should fail. _, err = js.PullSubscribe("baz", "rip") if err != nats.ErrSubjectMismatch { t.Fatalf("Expected a %q error but got %q", nats.ErrSubjectMismatch, err) } // Queue up 10 more messages. for i := 0; i < toSend; i++ { js.Publish("bar", msg) } sub, err = js.PullSubscribe("bar", "rip") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // No new JS consumer was created. expectConsumers(t, 1) // Fetch messages a couple of times. expected = 5 bmsgs, err = sub.Fetch(expected, nats.MaxWait(2*time.Second)) if err != nil { t.Fatal(err) } got = len(bmsgs) if got != expected { t.Errorf("Expected: %v, got: %v", expected, got) } info, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.NumAckPending != batch || info.NumPending != uint64(toSend-batch) { t.Fatalf("Expected ack pending of %d and pending to be %d, got %d %d", batch, toSend-batch, info.NumAckPending, info.NumPending) } // Pull subscriptions can't use NextMsg variants. if _, err := sub.NextMsg(time.Second); err != nats.ErrTypeSubscription { t.Fatalf("Expected error %q, got %v", nats.ErrTypeSubscription, err) } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if _, err := sub.NextMsgWithContext(ctx); err != nats.ErrTypeSubscription { t.Fatalf("Expected error %q, got %v", nats.ErrTypeSubscription, err) } cancel() // Prevent invalid durable names if _, err := js.SubscribeSync("baz", nats.Durable("test.durable")); err != nats.ErrInvalidConsumerName { t.Fatalf("Expected invalid durable name error") } ackWait := 1 * time.Millisecond sub, err = js.SubscribeSync("bar", nats.Durable("ack-wait"), nats.AckWait(ackWait)) if err != nil { t.Fatalf("Unexpected error: %v", err) } expectConsumers(t, 2) _, err = sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Config.AckWait != ackWait { t.Errorf("Expected %v, got %v", ackWait, info.Config.AckWait) } // Add Stream and Consumer name to metadata. sub, err = js.SubscribeSync("bar", nats.Durable("consumer-name")) if err != nil { t.Fatalf("Unexpected error: %v", err) } expectConsumers(t, 3) m, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } meta, err := m.Metadata() if err != nil { t.Fatal(err) } if meta.Stream != "TEST" { t.Fatalf("Unexpected stream name, got: %v", meta.Stream) } if meta.Consumer != "consumer-name" { t.Fatalf("Unexpected consumer name, got: %v", meta.Consumer) } qsubDurable := nats.Durable("qdur-chan") mch := make(chan *nats.Msg, 16536) sub, err = js.ChanQueueSubscribe("bar", "v1", mch, qsubDurable) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() expectConsumers(t, 4) var a, b *nats.MsgMetadata select { case msg := <-mch: meta, err := msg.Metadata() if err != nil { t.Error(err) } a = meta case <-time.After(2 * time.Second): t.Errorf("Timeout waiting for message") } mch2 := make(chan *nats.Msg, 16536) sub, err = js.ChanQueueSubscribe("bar", "v1", mch2, qsubDurable) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // Not a new JS consumer expectConsumers(t, 4) // Publish more messages so that at least one is received by // the channel queue subscriber. for i := 0; i < toSend; i++ { js.Publish("bar", msg) } select { case msg := <-mch2: meta, err := msg.Metadata() if err != nil { t.Error(err) } b = meta case <-time.After(2 * time.Second): t.Errorf("Timeout waiting for message") } if reflect.DeepEqual(a, b) { t.Errorf("Expected to receive different messages in stream") } // Both ChanQueueSubscribers use the same consumer. expectConsumers(t, 4) sub, err = js.SubscribeSync("foo", nats.InactiveThreshold(-100*time.Millisecond)) if err == nil || !strings.Contains(err.Error(), "invalid InactiveThreshold") { t.Fatalf("Expected error about invalid option, got %v", err) } // Create an ephemeral with a lower inactive threshold sub, err = js.SubscribeSync("foo", nats.InactiveThreshold(50*time.Millisecond)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } ci, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Error on consumer info: %v", err) } name := ci.Name nc.Close() time.Sleep(150 * time.Millisecond) nc, js = jsClient(t, s) defer nc.Close() if ci, err := js.ConsumerInfo("TEST", name); err == nil { t.Fatalf("Expected no consumer to exist, got %+v", ci) } } func TestJetStreamSubscribe_SkipConsumerLookup(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ Name: "cons", DeliverSubject: "_INBOX.foo", AckPolicy: nats.AckExplicitPolicy, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // for checking whether subscribe looks up the consumer infoSub, err := nc.SubscribeSync("$JS.API.CONSUMER.INFO.TEST.*") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer infoSub.Unsubscribe() // for checking whether subscribe creates the consumer createConsSub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.>") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer createConsSub.Unsubscribe() t.Run("use Bind to skip consumer lookup and create", func(t *testing.T) { sub, err := js.SubscribeSync("", nats.Bind("TEST", "cons"), nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // we should get timeout waiting for msg on CONSUMER.INFO if msg, err := infoSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer lookup; got message on %q", msg.Subject) } // we should get timeout waiting for msg on CONSUMER.CREATE if msg, err := createConsSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer create; got message on %q", msg.Subject) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := sub.NextMsg(100 * time.Millisecond); err != nil { t.Fatalf("Expected to receive msg; got: %s", err) } }) t.Run("use Durable, skip consumer lookup but overwrite the consumer", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.Durable("cons"), nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // we should get timeout waiting for msg on CONSUMER.INFO if msg, err := infoSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer lookup; got message on %q", msg.Subject) } // we should get msg on CONSUMER.CREATE if _, err := createConsSub.NextMsg(50 * time.Millisecond); err != nil { t.Fatalf("Expected consumer create; got: %s", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := sub.NextMsg(100 * time.Millisecond); err != nil { t.Fatalf("Expected to receive msg; got: %s", err) } }) t.Run("create new consumer with Durable, skip lookup", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.Durable("pp"), nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // we should get timeout waiting for msg on CONSUMER.INFO if msg, err := infoSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer lookup; got message on %q", msg.Subject) } // we should get msg on CONSUMER.CREATE if _, err := createConsSub.NextMsg(50 * time.Millisecond); err != nil { t.Fatalf("Expected consumer create; got: %s", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := sub.NextMsg(100 * time.Millisecond); err != nil { t.Fatalf("Expected to receive msg; got: %s", err) } }) t.Run("create new consumer with ConsumerName, skip lookup", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.ConsumerName("pp"), nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // we should get timeout waiting for msg on CONSUMER.INFO if msg, err := infoSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer lookup; got message on %q", msg.Subject) } // we should get msg on CONSUMER.CREATE if _, err := createConsSub.NextMsg(50 * time.Millisecond); err != nil { t.Fatalf("Expected consumer create; got: %s", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := sub.NextMsg(100 * time.Millisecond); err != nil { t.Fatalf("Expected to receive msg; got: %s", err) } }) t.Run("create ephemeral consumer, SkipConsumerLookup has no effect", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo2")) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // we should get timeout waiting for msg on CONSUMER.INFO if msg, err := infoSub.NextMsg(50 * time.Millisecond); err == nil { t.Fatalf("Expected to skip consumer lookup; got message on %q", msg.Subject) } // we should get msg on CONSUMER.CREATE if _, err := createConsSub.NextMsg(50 * time.Millisecond); err != nil { t.Fatalf("Expected consumer create; got: %s", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, err := sub.NextMsg(100 * time.Millisecond); err != nil { t.Fatalf("Expected to receive msg; got: %s", err) } }) t.Run("attempt to update ack policy of existing consumer", func(t *testing.T) { _, err := js.SubscribeSync("foo", nats.Durable("cons"), nats.SkipConsumerLookup(), nats.DeliverSubject("_INBOX.foo"), nats.AckAll()) if err == nil || !strings.Contains(err.Error(), "ack policy can not be updated") { t.Fatalf("Expected update consumer error, got: %v", err) } }) } func TestPullSubscribeFetchWithHeartbeat(t *testing.T) { t.Skip("Since v2.10.26 server sends no responders if the consumer is deleted, we need to figure out how else to test missing heartbeats") s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } defer sub.Unsubscribe() for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } // fetch 5 messages, should finish immediately msgs, err := sub.Fetch(5, nats.PullHeartbeat(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } now := time.Now() // no messages available, should time out normally _, err = sub.Fetch(5, nats.PullHeartbeat(50*time.Millisecond), nats.MaxWait(300*time.Millisecond)) elapsed := time.Since(now) if elapsed < 300*time.Millisecond { t.Fatalf("Expected timeout after 300ms; got: %v", elapsed) } if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } // delete consumer to verify heartbeats are not sent anymore info, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := js.DeleteConsumer("TEST", info.Name); err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = sub.Fetch(5, nats.PullHeartbeat(100*time.Millisecond), nats.MaxWait(1*time.Second)) if !errors.Is(err, nats.ErrNoHeartbeat) { t.Fatalf("Expected no heartbeat error; got: %v", err) } // heartbeat value too large _, err = sub.Fetch(5, nats.PullHeartbeat(200*time.Millisecond), nats.MaxWait(300*time.Millisecond)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected invalid arg error; got: %v", err) } // heartbeat value invalid _, err = sub.Fetch(5, nats.PullHeartbeat(-1)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected invalid arg error; got: %v", err) } // set short timeout on JetStream context js, err = nc.JetStream(nats.MaxWait(100 * time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub1, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } defer sub.Unsubscribe() // should produce invalid arg error based on default timeout from JetStream context _, err = sub1.Fetch(5, nats.PullHeartbeat(100*time.Millisecond)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected invalid arg error; got: %v", err) } // overwrite default timeout with context timeout, fetch available messages ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond) defer cancel() msgs, err = sub1.Fetch(10, nats.PullHeartbeat(100*time.Millisecond), nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %s", err) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } for _, msg := range msgs { msg.Ack() } // overwrite default timeout with max wait, should time out because no messages are available _, err = sub1.Fetch(5, nats.PullHeartbeat(100*time.Millisecond), nats.MaxWait(300*time.Millisecond)) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error; got: %v", err) } } func TestPullSubscribeConsumerDoesNotExist(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } defer sub.Unsubscribe() info, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := js.DeleteConsumer("TEST", info.Name); err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = sub.Fetch(5) if !errors.Is(err, nats.ErrNoResponders) { t.Fatalf("Expected no responders error; got: %v", err) } msgs, err := sub.FetchBatch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case _, ok := <-msgs.Messages(): if ok { t.Fatalf("Expected no messages") } case <-time.After(time.Second): t.Fatalf("Timeout waiting for messages") } if !errors.Is(msgs.Error(), nats.ErrNoResponders) { t.Fatalf("Expected no responders error; got: %v", msgs.Error()) } } func TestPullSubscribeFetchDrain(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 100; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } // fill buffer with messages cinfo, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %s", err) } nextSubject := fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.TEST.%s", cinfo.Name) replySubject := strings.Replace(sub.Subject, "*", "abc", 1) payload := `{"batch":10,"no_wait":true}` if err := nc.PublishRequest(nextSubject, replySubject, []byte(payload)); err != nil { t.Fatalf("Unexpected error: %s", err) } time.Sleep(100 * time.Millisecond) // now drain the subscription, messages should be in the buffer sub.Drain() msgs, err := sub.Fetch(100) if err != nil { t.Fatalf("Unexpected error: %s", err) } for _, msg := range msgs { msg.Ack() } if len(msgs) != 10 { t.Fatalf("Expected %d messages; got: %d", 10, len(msgs)) } // subsequent fetch should return error, subscription is already drained _, err = sub.Fetch(10, nats.MaxWait(100*time.Millisecond)) if !errors.Is(err, nats.ErrSubscriptionClosed) { t.Fatalf("Expected error: %s; got: %s", nats.ErrSubscriptionClosed, err) } } func TestPullSubscribeFetchBatchWithHeartbeat(t *testing.T) { t.Skip("Since v2.10.26 server sends no responders if the consumer is deleted, we need to figure out how else to test missing heartbeats") s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } defer sub.Unsubscribe() for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } // fetch 5 messages, should finish immediately msgs, err := sub.FetchBatch(5, nats.PullHeartbeat(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } var i int for msg := range msgs.Messages() { i++ msg.Ack() } if i != 5 { t.Fatalf("Expected %d messages; got: %d", 5, i) } if msgs.Error() != nil { t.Fatalf("Unexpected error: %s", msgs.Error()) } now := time.Now() // no messages available, should time out normally msgs, err = sub.FetchBatch(5, nats.PullHeartbeat(50*time.Millisecond), nats.MaxWait(300*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } i = 0 for msg := range msgs.Messages() { i++ msg.Ack() } elapsed := time.Since(now) if i != 0 { t.Fatalf("Expected %d messages; got: %d", 0, i) } if msgs.Error() != nil { t.Fatalf("Unexpected error: %s", msgs.Error()) } if elapsed < 250*time.Millisecond { t.Fatalf("Expected timeout after 300ms; got: %v", elapsed) } // delete consumer to verify heartbeats are not sent anymore info, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := js.DeleteConsumer("TEST", info.Name); err != nil { t.Fatalf("Unexpected error: %v", err) } now = time.Now() msgs, err = sub.FetchBatch(5, nats.PullHeartbeat(100*time.Millisecond), nats.MaxWait(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for msg := range msgs.Messages() { msg.Ack() } elapsed = time.Since(now) if elapsed < 200*time.Millisecond || elapsed > 300*time.Millisecond { t.Fatalf("Expected timeout after 200ms and before 300ms; got: %v", elapsed) } if !errors.Is(msgs.Error(), nats.ErrNoHeartbeat) { t.Fatalf("Expected no heartbeat error; got: %v", err) } // heartbeat value too large _, err = sub.FetchBatch(5, nats.PullHeartbeat(200*time.Millisecond), nats.MaxWait(300*time.Millisecond)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected no heartbeat error; got: %v", err) } // heartbeat value invalid _, err = sub.FetchBatch(5, nats.PullHeartbeat(-1)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected no heartbeat error; got: %v", err) } // set short timeout on JetStream context js, err = nc.JetStream(nats.MaxWait(100 * time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub1, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } defer sub.Unsubscribe() // should produce invalid arg error based on default timeout from JetStream context _, err = sub1.Fetch(5, nats.PullHeartbeat(100*time.Millisecond)) if !errors.Is(err, nats.ErrInvalidArg) { t.Fatalf("Expected invalid arg error; got: %v", err) } // overwrite default timeout with context timeout, fetch available messages ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond) defer cancel() msgs, err = sub1.FetchBatch(10, nats.PullHeartbeat(100*time.Millisecond), nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %s", err) } for msg := range msgs.Messages() { msg.Ack() } // overwrite default timeout with max wait, should time out because no messages are available msgs, err = sub1.FetchBatch(5, nats.PullHeartbeat(100*time.Millisecond), nats.MaxWait(300*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } <-msgs.Done() if msgs.Error() != nil { t.Fatalf("Unexpected error: %s", msgs.Error()) } } func TestPullSubscribeFetchBatch(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } t.Run("basic fetch", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } res, err := sub.FetchBatch(10) if err != nil { t.Fatalf("Unexpected error: %s", err) } go func() { time.Sleep(10 * time.Millisecond) for i := 0; i < 5; i++ { js.Publish("foo", []byte("msg")) } }() msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 10 { t.Fatalf("Expected %d messages; got: %d", 10, len(msgs)) } }) t.Run("multiple concurrent fetches", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 50; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } var r1, r2, r3 nats.MessageBatch started := &sync.WaitGroup{} started.Add(3) errs := make(chan error, 3) go func() { var err error r1, err = sub.FetchBatch(10) if err != nil { errs <- err } started.Done() }() go func() { var err error r2, err = sub.FetchBatch(10) if err != nil { errs <- err } started.Done() }() go func() { var err error r3, err = sub.FetchBatch(10) if err != nil { errs <- err } started.Done() }() // wait until batch results are available started.Wait() // check if any FetchBatch call returned an error select { case err := <-errs: t.Fatalf("Error initializing fetch: %s", err) default: } var msgsReceived int for msgsReceived < 30 { select { case <-r1.Messages(): msgsReceived++ case <-r2.Messages(): msgsReceived++ case <-r3.Messages(): msgsReceived++ case <-time.After(1 * time.Second): t.Fatalf("Timeout waiting for incoming messages") } } select { case <-r1.Done(): case <-time.After(1 * time.Second): t.Fatalf("FetchBatch result channel should be closed after receiving all messages on r1") } select { case <-r2.Done(): case <-time.After(1 * time.Second): t.Fatalf("FetchBatch result channel should be closed after receiving all messages on r2") } select { case <-r3.Done(): case <-time.After(1 * time.Second): t.Fatalf("FetchBatch result channel should be closed after receiving all messages on r3") } if r1.Error() != nil { t.Fatalf("Unexpected error: %s", r1.Error()) } if r2.Error() != nil { t.Fatalf("Unexpected error: %s", r2.Error()) } if r3.Error() != nil { t.Fatalf("Unexpected error: %s", r3.Error()) } if msgsReceived != 30 { t.Fatalf("Expected %d messages; got: %d", 30, msgsReceived) } }) t.Run("deliver all, then consume", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } res, err := sub.FetchBatch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } time.Sleep(10 * time.Millisecond) msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } }) t.Run("fetch with context", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() res, err := sub.FetchBatch(10, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %s", err) } go func() { time.Sleep(10 * time.Millisecond) for i := 0; i < 5; i++ { js.Publish("foo", []byte("msg")) } }() msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 10 { t.Fatalf("Expected %d messages; got: %d", 10, len(msgs)) } }) t.Run("fetch subset of messages", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 10; i++ { js.Publish("foo", []byte("msg")) } res, err := sub.FetchBatch(5) if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 10, len(msgs)) } }) t.Run("context timeout, no error", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() res, err := sub.FetchBatch(10, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } }) t.Run("request expired", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } res, err := sub.FetchBatch(10, nats.MaxWait(50*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } }) t.Run("cancel context during fetch", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) res, err := sub.FetchBatch(10, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %s", err) } go func() { time.Sleep(200 * time.Millisecond) cancel() }() msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() == nil || !errors.Is(res.Error(), context.Canceled) { t.Fatalf("Expected error: %s; got: %s", nats.ErrConsumerDeleted, res.Error()) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } }) t.Run("remove durable consumer during fetch", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "cons") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 5; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } res, err := sub.FetchBatch(10) if err != nil { t.Fatalf("Unexpected error: %s", err) } go func() { time.Sleep(10 * time.Millisecond) js.DeleteConsumer("TEST", "cons") }() msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) } if res.Error() == nil || !errors.Is(res.Error(), nats.ErrConsumerDeleted) { t.Fatalf("Expected error: %s; got: %s", nats.ErrConsumerDeleted, err) } if len(msgs) != 5 { t.Fatalf("Expected %d messages; got: %d", 5, len(msgs)) } }) t.Run("validation errors", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } // negative batch size _, err = sub.FetchBatch(-1) if !errors.Is(err, nats.ErrInvalidArg) { t.Errorf("Expected error: %s; got: %s", nats.ErrInvalidArg, err) } syncSub, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %s", err) } // invalid subscription type _, err = syncSub.FetchBatch(10) if !errors.Is(err, nats.ErrTypeSubscription) { t.Errorf("Expected error: %s; got: %s", nats.ErrTypeSubscription, err) } // both context and max wait set ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() _, err = sub.FetchBatch(10, nats.Context(ctx), nats.MaxWait(2*time.Second)) if !errors.Is(err, nats.ErrContextAndTimeout) { t.Errorf("Expected error: %s; got: %s", nats.ErrContextAndTimeout, err) } // passing context.Background() to fetch _, err = sub.FetchBatch(10, nats.Context(context.Background())) if !errors.Is(err, nats.ErrNoDeadlineContext) { t.Errorf("Expected error: %s; got: %s", nats.ErrNoDeadlineContext, err) } }) t.Run("close subscription", func(t *testing.T) { defer js.PurgeStream("TEST") sub, err := js.PullSubscribe("foo", "") if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 100; i++ { if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Unexpected error: %s", err) } } // fill buffer with messages cinfo, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %s", err) } nextSubject := fmt.Sprintf("$JS.API.CONSUMER.MSG.NEXT.TEST.%s", cinfo.Name) replySubject := strings.Replace(sub.Subject, "*", "abc", 1) payload := `{"batch":10,"no_wait":true}` if err := nc.PublishRequest(nextSubject, replySubject, []byte(payload)); err != nil { t.Fatalf("Unexpected error: %s", err) } time.Sleep(100 * time.Millisecond) // now drain the subscription, messages should be in the buffer sub.Drain() res, err := sub.FetchBatch(100) if err != nil { t.Fatalf("Unexpected error: %s", err) } msgs := make([]*nats.Msg, 0) for msg := range res.Messages() { msgs = append(msgs, msg) msg.Ack() } if res.Error() != nil { t.Fatalf("Unexpected error: %s", res.Error()) } if len(msgs) != 10 { t.Fatalf("Expected %d messages; got: %d", 10, len(msgs)) } // subsequent fetch should return error, subscription is already drained _, err = sub.FetchBatch(10, nats.MaxWait(100*time.Millisecond)) if !errors.Is(err, nats.ErrSubscriptionClosed) { t.Fatalf("Expected error: %s; got: %s", nats.ErrSubscriptionClosed, err) } }) } func TestPullSubscribeConsumerDeleted(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatal(err) } t.Run("delete consumer", func(t *testing.T) { sub, err := js.PullSubscribe("foo", "cons") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() if err != nil { t.Fatal(err) } if _, err = sub.Fetch(1, nats.MaxWait(10*time.Millisecond)); err != nil { t.Fatalf("Expected error: %v; got: %v", nats.ErrTimeout, err) } time.AfterFunc(50*time.Millisecond, func() { js.DeleteConsumer("TEST", "cons") }) if _, err = sub.Fetch(1, nats.MaxWait(100*time.Millisecond)); !errors.Is(err, nats.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", nats.ErrConsumerDeleted, err) } }) t.Run("delete stream", func(t *testing.T) { sub, err := js.PullSubscribe("foo", "cons") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() if err != nil { t.Fatal(err) } if _, err = sub.Fetch(1, nats.MaxWait(10*time.Millisecond)); err != nil { t.Fatalf("Expected error: %v; got: %v", nats.ErrTimeout, err) } time.AfterFunc(50*time.Millisecond, func() { js.DeleteStream("TEST") }) if _, err = sub.Fetch(1, nats.MaxWait(100*time.Millisecond)); !errors.Is(err, nats.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", nats.ErrConsumerDeleted, err) } }) } func TestJetStreamAckPending_Pull(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } const totalMsgs = 4 for i := 0; i < totalMsgs; i++ { if _, err := js.Publish("foo", []byte(fmt.Sprintf("msg %d", i))); err != nil { t.Fatal(err) } } ackPendingLimit := 3 sub, err := js.PullSubscribe("foo", "dname-pull-ack-wait", nats.MaxAckPending(ackPendingLimit)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() var msgs []*nats.Msg for i := 0; i < ackPendingLimit; i++ { ms, err := sub.Fetch(1) if err != nil { t.Fatalf("Error on fetch: %v", err) } msgs = append(msgs, ms...) } // Since we don't ack, the next fetch should time out because the server // won't send new ones until we ack some. if _, err := sub.Fetch(1, nats.MaxWait(250*time.Millisecond)); err != nats.ErrTimeout { t.Fatalf("Expected timeout, got: %v", err) } // Ack one message, then we should be able to get the next msgs[0].Ack() if _, err := sub.Fetch(1); err != nil { t.Fatalf("Unexpected error: %v", err) } } func TestJetStreamAckPending_Push(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } const totalMsgs = 3 for i := 0; i < totalMsgs; i++ { if _, err := js.Publish("foo", []byte(fmt.Sprintf("msg %d", i))); err != nil { t.Fatal(err) } } sub, err := js.SubscribeSync("foo", nats.Durable("dname-wait"), nats.AckWait(100*time.Millisecond), nats.MaxDeliver(5), nats.MaxAckPending(3), ) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() // 3 messages delivered 5 times. expected := 15 timeout := time.Now().Add(2 * time.Second) pending := 0 for time.Now().Before(timeout) { if pending, _, _ = sub.Pending(); pending >= expected { break } time.Sleep(10 * time.Millisecond) } if pending < expected { t.Errorf("Expected %v, got %v", expected, pending) } info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } got := info.NumRedelivered expected = 3 if got < expected { t.Errorf("Expected %v, got: %v", expected, got) } got = info.NumAckPending expected = 3 if got < expected { t.Errorf("Expected %v, got: %v", expected, got) } got = info.NumWaiting expected = 0 if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } got = int(info.NumPending) expected = 0 if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } got = info.Config.MaxAckPending expected = 3 if got != expected { t.Errorf("Expected %v, got %v", expected, pending) } got = info.Config.MaxDeliver expected = 5 if got != expected { t.Errorf("Expected %v, got %v", expected, pending) } acks := map[int]int{} ackPending := 3 timeout = time.Now().Add(2 * time.Second) for time.Now().Before(timeout) { info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } if got, want := info.NumAckPending, ackPending; got > 0 && got != want { t.Fatalf("unexpected num ack pending: got=%d, want=%d", got, want) } // Continue to ack all messages until no more pending. pending, _, _ = sub.Pending() if pending == 0 { break } m, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Error getting next message: %v", err) } if err := m.AckSync(); err != nil { t.Fatalf("Error on ack message: %v", err) } meta, err := m.Metadata() if err != nil { t.Errorf("Unexpected error: %v", err) } acks[int(meta.Sequence.Stream)]++ if ackPending != 0 { ackPending-- } if int(meta.NumPending) != ackPending { t.Errorf("Expected %v, got %v", ackPending, meta.NumPending) } } got = len(acks) expected = 3 if got != expected { t.Errorf("Expected %v, got %v", expected, got) } expected = 5 for _, got := range acks { if got != expected { t.Errorf("Expected %v, got %v", expected, got) } } _, err = sub.NextMsg(100 * time.Millisecond) if err != nats.ErrTimeout { t.Errorf("Expected timeout, got: %v", err) } } func TestJetStream_Drain(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) ctx, done := context.WithTimeout(context.Background(), 10*time.Second) nc, err := nats.Connect(s.ClientURL(), nats.ClosedHandler(func(_ *nats.Conn) { done() })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"drain"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } total := 500 for i := 0; i < total; i++ { _, err := js.Publish("drain", []byte(fmt.Sprintf("i:%d", i))) if err != nil { t.Error(err) } } // Create some consumers and ensure that there are no timeouts. errCh := make(chan error, 2048) createSub := func(name string) (*nats.Subscription, error) { return js.Subscribe("drain", func(m *nats.Msg) { err := m.AckSync() if err != nil { errCh <- err } }, nats.Durable(name), nats.ManualAck()) } subA, err := createSub("A") if err != nil { t.Fatalf("Unexpected error: %v", err) } subB, err := createSub("B") if err != nil { t.Fatalf("Unexpected error: %v", err) } subC, err := createSub("C") if err != nil { t.Fatalf("Unexpected error: %v", err) } subD, err := createSub("D") if err != nil { t.Fatalf("Unexpected error: %v", err) } waitForDelivered := func(t *testing.T, sub *nats.Subscription) { t.Helper() timeout := time.Now().Add(2 * time.Second) for time.Now().Before(timeout) { if msgs, _ := sub.Delivered(); msgs != 0 { return } time.Sleep(10 * time.Millisecond) } } waitForDelivered(t, subA) waitForDelivered(t, subB) waitForDelivered(t, subC) waitForDelivered(t, subD) nc.Drain() select { case err := <-errCh: t.Fatalf("Error during drain: %+v", err) case <-ctx.Done(): // OK! } } func TestAckForNonJetStream(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() sub, _ := nc.SubscribeSync("foo") nc.PublishRequest("foo", "_INBOX_", []byte("OK")) m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if err := m.Ack(); err != nil { t.Fatalf("Expected no errors, got '%v'", err) } } func TestJetStreamManagement(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: enabled accounts: { A { users: [{ user: "foo" }] jetstream: { max_mem: 64MB, max_file: 64MB } } } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s, nats.UserInfo("foo", "")) defer nc.Close() // Create the stream using our client API. var si *nats.StreamInfo t.Run("create stream", func(t *testing.T) { consLimits := nats.StreamConsumerLimits{ MaxAckPending: 100, InactiveThreshold: 10 * time.Second, } cfg := &nats.StreamConfig{ Name: "foo", Subjects: []string{"foo", "bar", "baz"}, Compression: nats.S2Compression, ConsumerLimits: nats.StreamConsumerLimits{ MaxAckPending: 100, InactiveThreshold: 10 * time.Second, }, FirstSeq: 22, Metadata: map[string]string{ "foo": "bar", "baz": "quux", }, } si, err := js.AddStream(cfg) if err != nil { t.Fatalf("Unexpected error: %v", err) } if si == nil || si.Config.Name != "foo" { t.Fatalf("StreamInfo is not correct %+v", si) } if v1, v2 := si.Config.Metadata["foo"], si.Config.Metadata["baz"]; v1 != "bar" || v2 != "quux" { t.Fatalf("Metadata is not correct %+v", si.Config.Metadata) } if si.Config.Compression != nats.S2Compression { t.Fatalf("Compression is not correct %+v", si.Config.Compression) } if si.Config.FirstSeq != 22 { t.Fatalf("FirstSeq is not correct %+v", si.Config.FirstSeq) } if si.Config.ConsumerLimits != consLimits { t.Fatalf("ConsumerLimits is not correct %+v", si.Config.ConsumerLimits) } }) t.Run("stream with given name already exists", func(t *testing.T) { if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Description: "desc"}); !errors.Is(err, nats.ErrStreamNameAlreadyInUse) { t.Fatalf("Expected error: %v; got: %v", nats.ErrStreamNameAlreadyInUse, err) } }) for i := 0; i < 25; i++ { js.Publish("foo", []byte("hi")) } var err error t.Run("stream not found", func(t *testing.T) { si, err = js.StreamInfo("bar") if !errors.Is(err, nats.ErrStreamNotFound) { t.Fatalf("Expected error: %v, got: %v", nats.ErrStreamNotFound, err) } if si != nil { t.Fatalf("StreamInfo should be nil %+v", si) } }) t.Run("stream info", func(t *testing.T) { si, err = js.StreamInfo("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } if si == nil || si.Config.Name != "foo" { t.Fatalf("StreamInfo is not correct %+v", si) } }) t.Run("create bad stream", func(t *testing.T) { if _, err := js.AddStream(nil); err != nats.ErrStreamConfigRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamConfigRequired, err) } if _, err := js.AddStream(&nats.StreamConfig{Name: ""}); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if _, err := js.AddStream(&nats.StreamConfig{Name: "bad.stream.name"}); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } if _, err := js.AddStream(&nats.StreamConfig{Name: "bad stream name"}); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } }) t.Run("bad stream info", func(t *testing.T) { if _, err := js.StreamInfo(""); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if _, err := js.StreamInfo("bad.stream.name"); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } }) t.Run("stream update", func(t *testing.T) { if _, err := js.UpdateStream(nil); err != nats.ErrStreamConfigRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamConfigRequired, err) } if _, err := js.UpdateStream(&nats.StreamConfig{Name: ""}); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if _, err := js.UpdateStream(&nats.StreamConfig{Name: "bad.stream.name"}); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } prevMaxMsgs := si.Config.MaxMsgs si, err = js.UpdateStream(&nats.StreamConfig{Name: "foo", MaxMsgs: prevMaxMsgs + 100}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if si == nil || si.Config.Name != "foo" || si.Config.MaxMsgs == prevMaxMsgs { t.Fatalf("StreamInfo is not correct %+v", si) } }) t.Run("create consumer", func(t *testing.T) { t.Run("with durable set", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.dlc") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "dlc", AckPolicy: nats.AckExplicitPolicy, Metadata: map[string]string{ "foo": "bar", "baz": "quux", }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Name != "dlc" || ci.Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } if v1, v2 := ci.Config.Metadata["foo"], ci.Config.Metadata["baz"]; v1 != "bar" || v2 != "quux" { t.Fatalf("Metadata is not correct %+v", ci.Config.Metadata) } }) t.Run("with name set", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.dlc-1") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc-1", AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc-1"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Name != "dlc-1" || ci.Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("with same Durable and Name set", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.dlc-2") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc-2", Name: "dlc-2", AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc-2"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Name != "dlc-2" || ci.Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("with name and filter subject", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.dlc-3.foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "dlc-3", AckPolicy: nats.AckExplicitPolicy, FilterSubject: "foo", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc-3"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Name != "dlc-3" || ci.Stream != "foo" || ci.Config.FilterSubject != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("legacy ephemeral consumer without name", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"stream_name":"foo"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Config.Durable != "" || ci.Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("legacy durable with jetstream context option", func(t *testing.T) { jsLegacy, err := nc.JetStream(nats.UseLegacyDurableConsumers()) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := nc.SubscribeSync("$JS.API.CONSUMER.DURABLE.CREATE.foo.dlc-4") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := jsLegacy.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc-4", AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc-4"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Config.Durable != "dlc-4" || ci.Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("durable consumer with multiple filter subjects", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo.dlc-5") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "dlc-5", AckPolicy: nats.AckExplicitPolicy, FilterSubjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(msg.Data), `"durable_name":"dlc-5"`) { t.Fatalf("create consumer message is not correct: %q", string(msg.Data)) } if ci == nil || ci.Config.Durable != "dlc-5" || !reflect.DeepEqual(ci.Config.FilterSubjects, []string{"foo", "bar"}) { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("ephemeral consumer with multiple filter subjects", func(t *testing.T) { sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{ AckPolicy: nats.AckExplicitPolicy, FilterSubjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci == nil || !reflect.DeepEqual(ci.Config.FilterSubjects, []string{"foo", "bar"}) { t.Fatalf("ConsumerInfo is not correct %+v", ci) } }) t.Run("multiple filter subjects errors", func(t *testing.T) { // both filter subject and filter subjects provided _, err := js.AddConsumer("foo", &nats.ConsumerConfig{ AckPolicy: nats.AckExplicitPolicy, FilterSubjects: []string{"foo", "bar"}, FilterSubject: "baz", }) if !errors.Is(err, nats.ErrDuplicateFilterSubjects) { t.Fatalf("Expected: %v; got: %v", nats.ErrDuplicateFilterSubjects, err) } // overlapping filter subjects _, err = js.AddConsumer("foo", &nats.ConsumerConfig{ AckPolicy: nats.AckExplicitPolicy, FilterSubjects: []string{"foo.*", "foo.A"}, }) if !errors.Is(err, nats.ErrOverlappingFilterSubjects) { t.Fatalf("Expected: %v; got: %v", nats.ErrOverlappingFilterSubjects, err) } // empty filter subject in filter subjects _, err = js.AddConsumer("foo", &nats.ConsumerConfig{ AckPolicy: nats.AckExplicitPolicy, FilterSubjects: []string{"foo", ""}, }) if !errors.Is(err, nats.ErrEmptyFilter) { t.Fatalf("Expected: %v; got: %v", nats.ErrEmptyFilter, err) } }) t.Run("with invalid filter subject", func(t *testing.T) { if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Name: "tc", FilterSubject: ".foo"}); !errors.Is(err, nats.ErrInvalidFilterSubject) { t.Fatalf("Expected: %v; got: %v", nats.ErrInvalidFilterSubject, err) } if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Name: "tc", FilterSubject: "foo."}); !errors.Is(err, nats.ErrInvalidFilterSubject) { t.Fatalf("Expected: %v; got: %v", nats.ErrInvalidFilterSubject, err) } }) t.Run("with invalid consumer name", func(t *testing.T) { if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "test.durable"}); err != nats.ErrInvalidConsumerName { t.Fatalf("Expected: %v; got: %v", nats.ErrInvalidConsumerName, err) } if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "test durable"}); err != nats.ErrInvalidConsumerName { t.Fatalf("Expected: %v; got: %v", nats.ErrInvalidConsumerName, err) } }) t.Run("consumer with given name already exists, configs do not match", func(t *testing.T) { // configs do not match if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckAllPolicy}); !errors.Is(err, nats.ErrConsumerNameAlreadyInUse) { t.Fatalf("Expected error: %v; got: %v", nats.ErrConsumerNameAlreadyInUse, err) } }) t.Run("consumer with given name already exists, configs are the same", func(t *testing.T) { // configs are the same if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}); err != nil { t.Fatalf("Expected no error; got: %v", err) } }) t.Run("stream does not exist", func(t *testing.T) { _, err = js.AddConsumer("missing", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) if err != nats.ErrStreamNotFound { t.Fatalf("Expected stream not found error, got: %v", err) } }) t.Run("params validation error", func(t *testing.T) { _, err = js.AddConsumer("", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) if err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got: %v", nats.ErrStreamNameRequired, err) } _, err = js.AddConsumer("bad.stream.name", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) if err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got: %v", nats.ErrInvalidStreamName, err) } _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "bad.consumer.name", AckPolicy: nats.AckExplicitPolicy}) if err != nats.ErrInvalidConsumerName { t.Fatalf("Expected %v, got: %v", nats.ErrInvalidConsumerName, err) } }) }) t.Run("consumer info", func(t *testing.T) { if _, err := js.ConsumerInfo("", "dlc"); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if _, err := js.ConsumerInfo("bad.stream.name", "dlc"); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } if _, err := js.ConsumerInfo("foo", ""); err != nats.ErrConsumerNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrConsumerNameRequired, err) } if _, err := js.ConsumerInfo("foo", "bad.consumer.name"); err != nats.ErrInvalidConsumerName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidConsumerName, err) } ci, err := js.ConsumerInfo("foo", "dlc") if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci == nil || ci.Config.Durable != "dlc" { t.Fatalf("ConsumerInfo is not correct %+v", si) } }) t.Run("consumer not found", func(t *testing.T) { ci, err := js.ConsumerInfo("foo", "cld") if !errors.Is(err, nats.ErrConsumerNotFound) { t.Fatalf("Expected error: %v, got: %v", nats.ErrConsumerNotFound, err) } if ci != nil { t.Fatalf("ConsumerInfo should be nil %+v", ci) } }) t.Run("list streams", func(t *testing.T) { var infos []*nats.StreamInfo for info := range js.Streams() { infos = append(infos, info) } if len(infos) != 1 || infos[0].Config.Name != "foo" { t.Fatalf("StreamInfo is not correct %+v", infos) } }) t.Run("list consumers", func(t *testing.T) { var infos []*nats.ConsumerInfo for info := range js.Consumers("") { infos = append(infos, info) } if len(infos) != 0 { t.Fatalf("ConsumerInfo is not correct %+v", infos) } for info := range js.Consumers("bad.stream.name") { infos = append(infos, info) } if len(infos) != 0 { t.Fatalf("ConsumerInfo is not correct %+v", infos) } infos = infos[:0] for info := range js.Consumers("foo") { infos = append(infos, info) } if len(infos) != 8 || infos[0].Stream != "foo" { t.Fatalf("ConsumerInfo is not correct %+v", infos) } }) t.Run("list consumer names", func(t *testing.T) { var names []string ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() for name := range js.ConsumerNames("foo", nats.Context(ctx)) { names = append(names, name) } if got, want := len(names), 8; got != want { t.Fatalf("Unexpected names, got=%d, want=%d", got, want) } }) t.Run("delete consumers", func(t *testing.T) { if err := js.DeleteConsumer("", "dlc"); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if err := js.DeleteConsumer("bad.stream.name", "dlc"); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } if err := js.DeleteConsumer("foo", ""); err != nats.ErrConsumerNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrConsumerNameRequired, err) } if err := js.DeleteConsumer("foo", "bad.consumer.name"); err != nats.ErrInvalidConsumerName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidConsumerName, err) } if err := js.DeleteConsumer("foo", "dlc"); err != nil { t.Fatalf("Unexpected error: %v", err) } }) t.Run("update consumer", func(t *testing.T) { ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "update_push_consumer", DeliverSubject: "bar", AckPolicy: nats.AckExplicitPolicy, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Currently, server supports these fields: // description, ack_wait, max_deliver, sample_freq, max_ack_pending, max_waiting and headers_only expected := ci.Config expected.Description = "my description" expected.AckWait = 2 * time.Second expected.MaxDeliver = 1 expected.SampleFrequency = "30" expected.MaxAckPending = 10 expected.HeadersOnly = true // Check that stream name is required _, err = js.UpdateConsumer("", &expected) if err != nats.ErrStreamNameRequired { t.Fatalf("Expected stream name required error, got %v", err) } // Check that stream name is valid _, err = js.UpdateConsumer("bad.stream.name", &expected) if err != nats.ErrInvalidStreamName { t.Fatalf("Expected stream name required error, got %v", err) } // Check that consumer name is required expected.Durable = "" expected.Name = "" _, err = js.UpdateConsumer("foo", &expected) if err != nats.ErrConsumerNameRequired { t.Fatalf("Expected consumer name required error, got %v", err) } // Check that durable name is valid expected.Durable = "bad.consumer.name" _, err = js.UpdateConsumer("foo", &expected) if err != nats.ErrInvalidConsumerName { t.Fatalf("Expected invalid consumer name error, got %v", err) } expected.Durable = "update_push_consumer" // Check that configuration is required _, err = js.UpdateConsumer("foo", nil) if err != nats.ErrConsumerConfigRequired { t.Fatalf("Expected consumer configuration required error, got %v", err) } // Now check that update works and expected fields have been updated ci, err = js.UpdateConsumer("foo", &expected) if err != nil { t.Fatalf("Error on update: %v", err) } expected.Name = "update_push_consumer" if !reflect.DeepEqual(ci.Config, expected) { t.Fatalf("Expected config to be %+v, got %+v", expected, ci.Config) } // Now check with pull consumer ci, err = js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "update_pull_consumer", AckPolicy: nats.AckExplicitPolicy, MaxWaiting: 1, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Currently, server supports these fields: // description, ack_wait, max_deliver, sample_freq, max_ack_pending, max_waiting and headers_only expected = ci.Config expected.Description = "my description" expected.AckWait = 2 * time.Second expected.MaxDeliver = 1 expected.SampleFrequency = "30" expected.MaxAckPending = 10 expected.HeadersOnly = true expected.MaxRequestBatch = 10 expected.MaxRequestExpires = 2 * time.Second expected.MaxRequestMaxBytes = 1024 ci, err = js.UpdateConsumer("foo", &expected) if err != nil { t.Fatalf("Error on update: %v", err) } if !reflect.DeepEqual(ci.Config, expected) { t.Fatalf("Expected config to be %+v, got %+v", expected, ci.Config) } }) t.Run("purge stream", func(t *testing.T) { if err := js.PurgeStream(""); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if err := js.PurgeStream("bad.stream.name"); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } if err := js.PurgeStream("foo"); err != nil { t.Fatalf("Unexpected error: %v", err) } if si, err := js.StreamInfo("foo"); err != nil { t.Fatalf("Unexpected error: %v", err) } else if si.State.Msgs != 0 { t.Fatalf("StreamInfo.Msgs is not correct") } }) t.Run("list stream names", func(t *testing.T) { var names []string ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() for name := range js.StreamNames(nats.Context(ctx)) { names = append(names, name) } if got, want := len(names), 1; got != want { t.Fatalf("Unexpected names, got=%d, want=%d", got, want) } }) t.Run("delete stream", func(t *testing.T) { if err := js.DeleteStream(""); err != nats.ErrStreamNameRequired { t.Fatalf("Expected %v, got %v", nats.ErrStreamNameRequired, err) } if err := js.DeleteStream("bad.stream.name"); err != nats.ErrInvalidStreamName { t.Fatalf("Expected %v, got %v", nats.ErrInvalidStreamName, err) } if err := js.DeleteStream("foo"); err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.StreamInfo("foo"); err == nil { t.Fatalf("Unexpected success") } }) t.Run("fetch account info", func(t *testing.T) { info, err := js.AccountInfo() if err != nil { t.Fatal(err) } if info.Limits.MaxMemory != 67108864 { t.Errorf("Expected to have memory limits, got: %v", info.Limits.MaxMemory) } if info.Limits.MaxStore != 67108864 { t.Errorf("Expected to have disk limits, got: %v", info.Limits.MaxMemory) } if info.Limits.MaxStreams != -1 { t.Errorf("Expected to not have stream limits, got: %v", info.Limits.MaxStreams) } if info.Limits.MaxConsumers != -1 { t.Errorf("Expected to not have consumer limits, got: %v", info.Limits.MaxConsumers) } if info.API.Total == 0 { t.Errorf("Expected some API calls, got: %v", info.API.Total) } if info.API.Errors == 0 { t.Errorf("Expected some API error, got: %v", info.API.Errors) } }) } func TestStreamConfigMatches(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() cfg := nats.StreamConfig{ Name: "stream", Description: "desc", Subjects: []string{"foo.*"}, Retention: nats.WorkQueuePolicy, MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, Discard: nats.DiscardNew, DiscardNewPerSubject: true, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Storage: nats.MemoryStorage, Replicas: 1, NoAck: true, Duplicates: 10 * time.Second, Sealed: false, DenyDelete: true, DenyPurge: false, AllowRollup: true, Compression: nats.S2Compression, FirstSeq: 5, SubjectTransform: &nats.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, RePublish: &nats.RePublish{ Source: ">", Destination: "RP.>", HeadersOnly: true, }, AllowDirect: true, ConsumerLimits: nats.StreamConsumerLimits{ InactiveThreshold: 10 * time.Second, MaxAckPending: 500, }, } s, err := js.AddStream(&cfg) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.Config.Metadata = nil if !reflect.DeepEqual(s.Config, cfg) { t.Fatalf("StreamConfig doesn't match: %#v", s.Config) } cfgMirror := nats.StreamConfig{ Name: "mirror", MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Replicas: 1, Duplicates: 10 * time.Second, Mirror: &nats.StreamSource{ Name: "stream", OptStartSeq: 10, SubjectTransforms: []nats.SubjectTransformConfig{ {Source: ">", Destination: "transformed.>"}, }, }, MirrorDirect: true, SubjectTransform: &nats.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, } s, err = js.AddStream(&cfgMirror) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.Config.Metadata = nil if !reflect.DeepEqual(s.Config, cfgMirror) { t.Fatalf("StreamConfig doesn't match: %#v", s.Config) } cfgSourcing := nats.StreamConfig{ Name: "sourcing", Subjects: []string{"BAR"}, MaxConsumers: 10, MaxMsgs: 100, MaxBytes: 1000, MaxAge: 100 * time.Second, MaxMsgsPerSubject: 1000, MaxMsgSize: 10000, Replicas: 1, Duplicates: 10 * time.Second, Sources: []*nats.StreamSource{ { Name: "stream", OptStartSeq: 10, SubjectTransforms: []nats.SubjectTransformConfig{ {Source: ">", Destination: "transformed.>"}, }, }, }, SubjectTransform: &nats.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, } s, err = js.AddStream(&cfgSourcing) if err != nil { t.Fatalf("Unexpected error: %v", err) } // server will set metadata values, so we need to clear them s.Config.Metadata = nil if !reflect.DeepEqual(s.Config, cfgSourcing) { t.Fatalf("StreamConfig doesn't match: %#v", s.Config) } } func TestStreamLister(t *testing.T) { tests := []struct { name string streamsNum int }{ { name: "single page", streamsNum: 5, }, { name: "multi page", streamsNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() for i := 0; i < test.streamsNum; i++ { if _, err := js.AddStream(&nats.StreamConfig{Name: fmt.Sprintf("stream_%d", i)}); err != nil { t.Fatalf("Unexpected error: %v", err) } } names := make([]string, 0) for name := range js.StreamNames() { names = append(names, name) } if len(names) != test.streamsNum { t.Fatalf("Invalid number of stream names; want: %d; got: %d", test.streamsNum, len(names)) } infos := make([]*nats.StreamInfo, 0) for info := range js.Streams() { infos = append(infos, info) } if len(infos) != test.streamsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.streamsNum, len(infos)) } // test the deprecated StreamsInfo() infos = make([]*nats.StreamInfo, 0) for info := range js.StreamsInfo() { infos = append(infos, info) } if len(infos) != test.streamsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.streamsNum, len(infos)) } }) } } func TestStreamLister_FilterSubject(t *testing.T) { streams := map[string][]string{ "s1": {"foo"}, "s2": {"bar"}, "s3": {"foo.*", "bar.*"}, "s4": {"foo-1.A"}, "s5": {"foo.A.bar.B"}, "s6": {"foo.C.bar.D.E"}, } tests := []struct { filter string expected []string }{ { filter: "foo", expected: []string{"s1"}, }, { filter: "bar", expected: []string{"s2"}, }, { filter: "*", expected: []string{"s1", "s2"}, }, { filter: ">", expected: []string{"s1", "s2", "s3", "s4", "s5", "s6"}, }, { filter: "*.A", expected: []string{"s3", "s4"}, }, } s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() for name, subjects := range streams { if _, err := js.AddStream(&nats.StreamConfig{Name: name, Subjects: subjects}); err != nil { t.Fatalf("Unexpected error: %v", err) } } for _, test := range tests { t.Run(test.filter, func(t *testing.T) { names := make([]string, 0) // list stream names for name := range js.StreamNames(nats.StreamListFilter(test.filter)) { names = append(names, name) } if !reflect.DeepEqual(names, test.expected) { t.Fatalf("Invalid result; want: %v; got: %v", test.expected, names) } // list streams names = make([]string, 0) for info := range js.Streams(nats.StreamListFilter(test.filter)) { names = append(names, info.Config.Name) } if !reflect.DeepEqual(names, test.expected) { t.Fatalf("Invalid result; want: %v; got: %v", test.expected, names) } }) } } func TestConsumersLister(t *testing.T) { tests := []struct { name string consumersNum int }{ { name: "single page", consumersNum: 5, }, { name: "multi page", consumersNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() js.AddStream(&nats.StreamConfig{Name: "foo"}) for i := 0; i < test.consumersNum; i++ { if _, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: fmt.Sprintf("cons_%d", i), AckPolicy: nats.AckExplicitPolicy}); err != nil { t.Fatalf("Unexpected error: %v", err) } } names := make([]string, 0) for name := range js.ConsumerNames("foo") { names = append(names, name) } if len(names) != test.consumersNum { t.Fatalf("Invalid number of consumer names; want: %d; got: %d", test.consumersNum, len(names)) } infos := make([]*nats.ConsumerInfo, 0) for info := range js.Consumers("foo") { infos = append(infos, info) } if len(infos) != test.consumersNum { t.Fatalf("Invalid number of consumers; want: %d; got: %d", test.consumersNum, len(infos)) } // test the deprecated ConsumersInfo() infos = make([]*nats.ConsumerInfo, 0) for info := range js.ConsumersInfo("foo") { infos = append(infos, info) } if len(infos) != test.consumersNum { t.Fatalf("Invalid number of consumers; want: %d; got: %d", test.consumersNum, len(infos)) } }) } } func TestAccountInfo(t *testing.T) { tests := []struct { name string cfg string expected *nats.AccountInfo withError error }{ { name: "server with default values", cfg: ` listen: 127.0.0.1:-1 jetstream: enabled `, expected: &nats.AccountInfo{ Tier: nats.Tier{ Memory: 0, Store: 0, Streams: 0, Consumers: 0, ReservedMemory: 0, ReservedStore: 0, Limits: nats.AccountLimits{ MaxMemory: -1, MaxStore: -1, MaxStreams: -1, MaxConsumers: -1, MaxAckPending: -1, MemoryMaxStreamBytes: -1, StoreMaxStreamBytes: -1, MaxBytesRequired: false, }, }, API: nats.APIStats{ Total: 0, Errors: 0, }, }, }, { name: "server with limits set", cfg: ` listen: 127.0.0.1:-1 jetstream: {domain: "test-domain"} accounts: { A { users: [{ user: "foo" }] jetstream: { max_mem: 64MB, max_file: 32MB, max_streams: 10, max_consumers: 20, max_ack_pending: 100, memory_max_stream_bytes: 2048, store_max_stream_bytes: 4096, max_stream_bytes: true } } } `, expected: &nats.AccountInfo{ Tier: nats.Tier{ Memory: 0, Store: 0, Streams: 0, Consumers: 0, ReservedMemory: 0, ReservedStore: 0, Limits: nats.AccountLimits{ MaxMemory: 67108864, MaxStore: 33554432, MaxStreams: 10, MaxConsumers: 20, MaxAckPending: 100, MemoryMaxStreamBytes: 2048, StoreMaxStreamBytes: 4096, MaxBytesRequired: true, }, }, Domain: "test-domain", API: nats.APIStats{ Total: 0, Errors: 0, }, }, }, { name: "jetstream not enabled", cfg: ` listen: 127.0.0.1:-1 `, withError: nats.ErrJetStreamNotEnabled, }, { name: "jetstream not enabled for account", cfg: ` listen: 127.0.0.1:-1 no_auth_user: foo jetstream: enabled accounts: { A: { users: [ {user: foo} ] }, } `, withError: nats.ErrJetStreamNotEnabledForAccount, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { conf := createConfFile(t, []byte(test.cfg)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s, nats.UserInfo("foo", "")) defer nc.Close() info, err := js.AccountInfo() if test.withError != nil { if err == nil || !errors.Is(err, test.withError) { t.Fatalf("Expected error: '%s'; got '%s'", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if !reflect.DeepEqual(test.expected, info) { t.Fatalf("Account info does not match; expected: %+v; got: %+v", test.expected, info) } _, err = js.AddStream(&nats.StreamConfig{Name: "FOO", MaxBytes: 1024}) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddConsumer("FOO", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // a total of 3 API calls is expected - get account info, create stream, create consumer test.expected.API.Total = 3 test.expected.Streams = 1 test.expected.Consumers = 1 info, err = js.AccountInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } // ignore reserved store in comparison since this is dynamically // assigned by the server info.ReservedStore = test.expected.ReservedStore if !reflect.DeepEqual(test.expected, info) { t.Fatalf("Account info does not match; expected: %+v; got: %+v", test.expected, info) } }) } } func TestPurgeStream(t *testing.T) { testData := []nats.Msg{ { Subject: "foo.A", Data: []byte("first on A"), }, { Subject: "foo.C", Data: []byte("first on C"), }, { Subject: "foo.B", Data: []byte("first on B"), }, { Subject: "foo.C", Data: []byte("second on C"), }, } tests := []struct { name string stream string req *nats.StreamPurgeRequest withError error expected []nats.Msg }{ { name: "purge all messages", stream: "foo", expected: []nats.Msg{}, }, { name: "with filter subject", stream: "foo", req: &nats.StreamPurgeRequest{ Subject: "foo.C", }, expected: []nats.Msg{ { Subject: "foo.A", Data: []byte("first on A"), }, { Subject: "foo.B", Data: []byte("first on B"), }, }, }, { name: "with sequence", stream: "foo", req: &nats.StreamPurgeRequest{ Sequence: 3, }, expected: []nats.Msg{ { Subject: "foo.B", Data: []byte("first on B"), }, { Subject: "foo.C", Data: []byte("second on C"), }, }, }, { name: "with keep", stream: "foo", req: &nats.StreamPurgeRequest{ Keep: 1, }, expected: []nats.Msg{ { Subject: "foo.C", Data: []byte("second on C"), }, }, }, { name: "with filter and sequence", stream: "foo", req: &nats.StreamPurgeRequest{ Subject: "foo.C", Sequence: 3, }, expected: []nats.Msg{ { Subject: "foo.A", Data: []byte("first on A"), }, { Subject: "foo.B", Data: []byte("first on B"), }, { Subject: "foo.C", Data: []byte("second on C"), }, }, }, { name: "with filter and keep", stream: "foo", req: &nats.StreamPurgeRequest{ Subject: "foo.C", Keep: 1, }, expected: []nats.Msg{ { Subject: "foo.A", Data: []byte("first on A"), }, { Subject: "foo.B", Data: []byte("first on B"), }, { Subject: "foo.C", Data: []byte("second on C"), }, }, }, { name: "empty stream name", stream: "", req: &nats.StreamPurgeRequest{ Subject: "foo.C", Keep: 1, }, withError: nats.ErrStreamNameRequired, }, { name: "invalid stream name", stream: "bad.stream.name", req: &nats.StreamPurgeRequest{ Subject: "foo.C", Keep: 1, }, withError: nats.ErrInvalidStreamName, }, { name: "invalid request - both sequence and keep provided", stream: "foo", req: &nats.StreamPurgeRequest{ Sequence: 3, Keep: 1, }, withError: nats.ErrBadRequest, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, msg := range testData { if _, err := js.PublishMsg(&msg); err != nil { t.Fatalf("Unexpected error during publish: %v", err) } } err = js.PurgeStream(test.stream, test.req) if test.withError != nil { if err == nil { t.Fatal("Expected error, got nil") } if !errors.Is(err, test.withError) { t.Fatalf("Expected error: '%s'; got '%s'", test.withError, err) } return } streamInfo, err := js.StreamInfo("foo", test.req) if err != nil { t.Fatalf("Unexpected error: %v", err) } if streamInfo.State.Msgs != uint64(len(test.expected)) { t.Fatalf("Unexpected message count: expected %d; got: %d", len(test.expected), streamInfo.State.Msgs) } sub, err := js.SubscribeSync("foo.*", nats.BindStream("foo")) if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < int(streamInfo.State.Msgs); i++ { msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %s", err) } if msg.Subject != test.expected[i].Subject { t.Fatalf("Unexpected message; subject is different than expected: want %s; got: %s", test.expected[i].Subject, msg.Subject) } if string(msg.Data) != string(test.expected[i].Data) { t.Fatalf("Unexpected message; data is different than expected: want %s; got: %s", test.expected[i].Data, msg.Data) } } }) } } func TestStreamInfoSubjectInfo(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.Publish("foo.A", []byte("")); err != nil { t.Fatalf("Unexpected error during publish: %v", err) } if _, err := js.Publish("foo.B", []byte("")); err != nil { t.Fatalf("Unexpected error during publish: %v", err) } si, err := js.StreamInfo("foo", &nats.StreamInfoRequest{ SubjectsFilter: "foo.A", }) if err != nil { t.Fatalf("Unexpected error: %v", err) } if si.State.NumSubjects != 2 { t.Fatal("Expected NumSubjects to be 1") } if len(si.State.Subjects) != 1 { t.Fatal("Expected Subjects len to be 1") } if si.State.Subjects["foo.A"] != 1 { t.Fatal("Expected Subjects to have an entry for foo.A with a count of 1") } } func TestStreamInfoDeletedDetails(t *testing.T) { testData := []string{"one", "two", "three", "four"} tests := []struct { name string stream string req *nats.StreamInfoRequest withError error expectedDeletedDetails []uint64 }{ { name: "empty request body", stream: "foo", }, { name: "with deleted details", stream: "foo", req: &nats.StreamInfoRequest{ DeletedDetails: true, }, expectedDeletedDetails: []uint64{2, 4}, }, { name: "with deleted details set to false", stream: "foo", req: &nats.StreamInfoRequest{ DeletedDetails: false, }, }, { name: "empty stream name", stream: "", withError: nats.ErrStreamNameRequired, }, { name: "invalid stream name", stream: "bad.stream.name", withError: nats.ErrInvalidStreamName, }, { name: "stream not found", stream: "bar", withError: nats.ErrStreamNotFound, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, msg := range testData { if _, err := js.Publish("foo.A", []byte(msg)); err != nil { t.Fatalf("Unexpected error during publish: %v", err) } } if err := js.DeleteMsg("foo", 2); err != nil { t.Fatalf("Unexpected error while deleting message from stream: %v", err) } if err := js.DeleteMsg("foo", 4); err != nil { t.Fatalf("Unexpected error while deleting message from stream: %v", err) } var streamInfo *nats.StreamInfo if test.req != nil { streamInfo, err = js.StreamInfo(test.stream, test.req) } else { streamInfo, err = js.StreamInfo(test.stream) } if test.withError != nil { if err == nil { t.Fatal("Expected error, got nil") } if !errors.Is(err, test.withError) { t.Fatalf("Expected error: '%s'; got '%s'", test.withError, err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } if streamInfo.Config.Name != "foo" { t.Fatalf("Invalid stream name in StreamInfo response: want: 'foo'; got: '%s'", streamInfo.Config.Name) } if streamInfo.State.NumDeleted != 2 { t.Fatalf("Invalid value for num_deleted in state: want: 2; got: %d", streamInfo.State.NumDeleted) } if !reflect.DeepEqual(test.expectedDeletedDetails, streamInfo.State.Deleted) { t.Fatalf("Invalid value for deleted msgs in state: want: %v; got: %v", test.expectedDeletedDetails, streamInfo.State.Deleted) } }) } } func TestJetStreamManagement_GetMsg(t *testing.T) { t.Run("1-node", func(t *testing.T) { withJSServer(t, testJetStreamManagement_GetMsg) }) t.Run("3-node", func(t *testing.T) { withJSCluster(t, "GET", 3, testJetStreamManagement_GetMsg) }) } func testJetStreamManagement_GetMsg(t *testing.T, srvs ...*jsServer) { s := srvs[0] nc, js := jsClient(t, s.Server) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { msg := nats.NewMsg("foo.A") data := fmt.Sprintf("A:%d", i) msg.Data = []byte(data) msg.Header = nats.Header{ "X-NATS-Key": []string{"123"}, } msg.Header.Add("X-Nats-Test-Data", data) js.PublishMsg(msg) js.Publish("foo.B", []byte(fmt.Sprintf("B:%d", i))) js.Publish("foo.C", []byte(fmt.Sprintf("C:%d", i))) } var originalSeq uint64 t.Run("get message", func(t *testing.T) { expected := 5 msgs := make([]*nats.Msg, 0) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err := js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() got := len(msgs) if got != expected { t.Fatalf("Expected: %d, got: %d", expected, got) } msg := msgs[3] meta, err := msg.Metadata() if err != nil { t.Fatal(err) } originalSeq = meta.Sequence.Stream // Get the same message using JSM. fetchedMsg, err := js.GetMsg("foo", originalSeq) if err != nil { t.Fatal(err) } expectedData := "C:3" if string(fetchedMsg.Data) != expectedData { t.Errorf("Expected: %v, got: %v", expectedData, string(fetchedMsg.Data)) } }) t.Run("get deleted message", func(t *testing.T) { err := js.DeleteMsg("foo", originalSeq) if err != nil { t.Fatal(err) } si, err := js.StreamInfo("foo") if err != nil { t.Fatal(err) } expected := 14 if int(si.State.Msgs) != expected { t.Errorf("Expected %d msgs, got: %d", expected, si.State.Msgs) } // There should be only 4 messages since one deleted. expected = 4 msgs := make([]*nats.Msg, 0) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err := js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() msg := msgs[3] meta, err := msg.Metadata() if err != nil { t.Fatal(err) } newSeq := meta.Sequence.Stream // First message removed if newSeq <= originalSeq { t.Errorf("Expected %d to be higher sequence than %d", newSeq, originalSeq) } // Try to fetch the same message which should be gone. _, err = js.GetMsg("foo", originalSeq) if err == nil || err != nats.ErrMsgNotFound { t.Errorf("Expected no message found error, got: %v", err) } }) t.Run("get message with headers", func(t *testing.T) { streamMsg, err := js.GetMsg("foo", 4) if err != nil { t.Fatal(err) } if streamMsg.Sequence != 4 { t.Errorf("Expected %v, got: %v", 4, streamMsg.Sequence) } expectedMap := map[string][]string{ "X-Nats-Test-Data": {"A:1"}, "X-NATS-Key": {"123"}, } if !reflect.DeepEqual(streamMsg.Header, nats.Header(expectedMap)) { t.Errorf("Expected %v, got: %v", expectedMap, streamMsg.Header) } sub, err := js.SubscribeSync("foo.A", nats.StartSequence(4)) if err != nil { t.Fatal(err) } msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(msg.Header, nats.Header(expectedMap)) { t.Errorf("Expected %v, got: %v", expectedMap, msg.Header) } }) } func TestJetStreamManagement_DeleteMsg(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { js.Publish("foo.A", []byte("A")) js.Publish("foo.B", []byte("B")) js.Publish("foo.C", []byte("C")) } si, err := js.StreamInfo("foo") if err != nil { t.Fatal(err) } var total uint64 = 15 if si.State.Msgs != total { t.Errorf("Expected %d msgs, got: %d", total, si.State.Msgs) } expected := 5 msgs := make([]*nats.Msg, 0) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err := js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() got := len(msgs) if got != expected { t.Fatalf("Expected %d, got %d", expected, got) } msg := msgs[0] meta, err := msg.Metadata() if err != nil { t.Fatal(err) } originalSeq := meta.Sequence.Stream // create a subscription on delete message API subject to verify the content of delete operation apiSub, err := nc.SubscribeSync("$JS.API.STREAM.MSG.DELETE.foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } err = js.DeleteMsg("foo", originalSeq) if err != nil { t.Fatal(err) } msg, err = apiSub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if str := string(msg.Data); !strings.Contains(str, "no_erase\":true") { t.Fatalf("Request should not have no_erase field set: %s", str) } si, err = js.StreamInfo("foo") if err != nil { t.Fatal(err) } total = 14 if si.State.Msgs != total { t.Errorf("Expected %d msgs, got: %d", total, si.State.Msgs) } // There should be only 4 messages since one deleted. expected = 4 msgs = make([]*nats.Msg, 0) ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err = js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() msg = msgs[0] meta, err = msg.Metadata() if err != nil { t.Fatal(err) } newSeq := meta.Sequence.Stream // First message removed if newSeq <= originalSeq { t.Errorf("Expected %d to be higher sequence than %d", newSeq, originalSeq) } } func TestJetStreamManagement_SecureDeleteMsg(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { js.Publish("foo.A", []byte("A")) js.Publish("foo.B", []byte("B")) js.Publish("foo.C", []byte("C")) } si, err := js.StreamInfo("foo") if err != nil { t.Fatal(err) } var total uint64 = 15 if si.State.Msgs != total { t.Errorf("Expected %d msgs, got: %d", total, si.State.Msgs) } expected := 5 msgs := make([]*nats.Msg, 0) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err := js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() got := len(msgs) if got != expected { t.Fatalf("Expected %d, got %d", expected, got) } msg := msgs[0] meta, err := msg.Metadata() if err != nil { t.Fatal(err) } originalSeq := meta.Sequence.Stream // create a subscription on delete message API subject to verify the content of delete operation apiSub, err := nc.SubscribeSync("$JS.API.STREAM.MSG.DELETE.foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } err = js.SecureDeleteMsg("foo", originalSeq) if err != nil { t.Fatal(err) } msg, err = apiSub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if str := string(msg.Data); strings.Contains(str, "no_erase\":true") { t.Fatalf("Request should not have no_erase field set: %s", str) } si, err = js.StreamInfo("foo") if err != nil { t.Fatal(err) } total = 14 if si.State.Msgs != total { t.Errorf("Expected %d msgs, got: %d", total, si.State.Msgs) } // There should be only 4 messages since one deleted. expected = 4 msgs = make([]*nats.Msg, 0) ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() sub, err = js.Subscribe("foo.C", func(msg *nats.Msg) { msgs = append(msgs, msg) if len(msgs) == expected { cancel() } }) if err != nil { t.Fatal(err) } <-ctx.Done() sub.Unsubscribe() msg = msgs[0] meta, err = msg.Metadata() if err != nil { t.Fatal(err) } newSeq := meta.Sequence.Stream // First message removed if newSeq <= originalSeq { t.Errorf("Expected %d to be higher sequence than %d", newSeq, originalSeq) } } func TestJetStreamImport(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: rip jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts: { JS: { jetstream: enabled users: [ {user: dlc, password: foo} ] exports [ { service: "$JS.API.>" }, { service: "foo" }] }, U: { users: [ {user: rip, password: bar} ] imports [ { service: { subject: "$JS.API.>", account: JS } , to: "dlc.>" } { service: { subject: "foo", account: JS } } ] }, } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) // Create a stream using JSM. ncm, jsm := jsClient(t, s, nats.UserInfo("dlc", "foo")) defer ncm.Close() var err error _, err = jsm.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("stream create failed: %v", err) } // Client with the imports. nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // Since we import with a prefix from above we can use that when creating our JS context. js, err := nc.JetStream(nats.APIPrefix("dlc")) if err != nil { t.Fatalf("Unexpected error: %v", err) } msg := []byte("Hello JS Import!") if _, err = js.Publish("foo", msg); err != nil { t.Fatalf("Unexpected publish error: %v", err) } } func TestJetStreamImportDirectOnly(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: rip jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts: { JS: { jetstream: enabled users: [ {user: dlc, password: foo} ] exports [ # For now have to expose the API to enable JS context across account. { service: "$JS.API.INFO" } # For the stream publish. { service: "ORDERS" } # For the pull based consumer. Response type needed for batchsize > 1 { service: "$JS.API.CONSUMER.INFO.ORDERS.d1", response: stream } { service: "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d1", response: stream } # For the push based consumer delivery and ack. { stream: "p.d" } { stream: "p.d3" } # For the acks. Service in case we want an ack to our ack. { service: "$JS.ACK.ORDERS.*.>" } # Allow lookup of stream to be able to bind from another account. { service: "$JS.API.CONSUMER.INFO.ORDERS.d4", response: stream } { stream: "p.d4" } ] }, U: { users: [ { user: rip, password: bar } ] imports [ { service: { subject: "$JS.API.INFO", account: JS } } { service: { subject: "ORDERS", account: JS } , to: "orders" } # { service: { subject: "$JS.API.CONSUMER.INFO.ORDERS.d1", account: JS } } { service: { subject: "$JS.API.CONSUMER.INFO.ORDERS.d4", account: JS } } { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d1", account: JS } } { stream: { subject: "p.d", account: JS } } { stream: { subject: "p.d3", account: JS } } { stream: { subject: "p.d4", account: JS } } { service: { subject: "$JS.ACK.ORDERS.*.>", account: JS } } ] }, V: { users: [ { user: v, password: quux, permissions: { publish: {deny: ["$JS.API.CONSUMER.INFO.ORDERS.d1"]} } } ] imports [ { service: { subject: "$JS.API.INFO", account: JS } } { service: { subject: "ORDERS", account: JS } , to: "orders" } { service: { subject: "$JS.API.CONSUMER.INFO.ORDERS.d1", account: JS } } { service: { subject: "$JS.API.CONSUMER.INFO.ORDERS.d4", account: JS } } { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d1", account: JS } } { stream: { subject: "p.d", account: JS } } { stream: { subject: "p.d3", account: JS } } { stream: { subject: "p.d4", account: JS } } { service: { subject: "$JS.ACK.ORDERS.*.>", account: JS } } ] }, } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) // Create a stream using JSM. ncm, jsm := jsClient(t, s, nats.UserInfo("dlc", "foo")) defer ncm.Close() var err error // Create a stream using the server directly. _, err = jsm.AddStream(&nats.StreamConfig{Name: "ORDERS"}) if err != nil { t.Fatalf("stream create failed: %v", err) } // Create a pull based consumer. _, err = jsm.AddConsumer("ORDERS", &nats.ConsumerConfig{Durable: "d1", AckPolicy: nats.AckExplicitPolicy}) if err != nil { t.Fatalf("pull consumer create failed: %v", err) } // Create a push based consumers. _, err = jsm.AddConsumer("ORDERS", &nats.ConsumerConfig{ Durable: "d2", AckPolicy: nats.AckExplicitPolicy, DeliverSubject: "p.d", }) if err != nil { t.Fatalf("push consumer create failed: %v", err) } _, err = jsm.AddConsumer("ORDERS", &nats.ConsumerConfig{ Durable: "d3", AckPolicy: nats.AckExplicitPolicy, DeliverSubject: "p.d3", }) if err != nil { t.Fatalf("push consumer create failed: %v", err) } _, err = jsm.AddConsumer("ORDERS", &nats.ConsumerConfig{ Durable: "d4", AckPolicy: nats.AckExplicitPolicy, DeliverSubject: "p.d4", }) if err != nil { t.Fatalf("push consumer create failed: %v", err) } nc, js := jsClient(t, s) defer nc.Close() // Now make sure we can send to the stream from another account. toSend := 100 for i := 0; i < toSend; i++ { if _, err := js.Publish("orders", []byte(fmt.Sprintf("ORDER-%d", i+1))); err != nil { t.Fatalf("Unexpected error publishing message %d: %v", i+1, err) } } var sub *nats.Subscription waitForPending := func(t *testing.T, n int) { t.Helper() timeout := time.Now().Add(2 * time.Second) for time.Now().Before(timeout) { if msgs, _, _ := sub.Pending(); msgs == n { return } time.Sleep(10 * time.Millisecond) } msgs, _, _ := sub.Pending() t.Fatalf("Expected to receive %d messages, but got %d", n, msgs) } // Do push based consumer using a regular NATS subscription on the import subject. sub, err = nc.SubscribeSync("p.d3") if err != nil { t.Fatalf("Unexpected error: %v", err) } waitForPending(t, toSend) // Can also ack from the regular NATS subscription via the imported subject. for i := 0; i < toSend; i++ { m, err := sub.NextMsg(100 * time.Millisecond) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Test that can expect an ack of the ack. err = m.AckSync() if err != nil { t.Errorf("Unexpected error: %v", err) } } // Can attach to the consumer from another JS account if there is a durable name. sub, err = js.SubscribeSync("ORDERS", nats.Durable("d4"), nats.BindStream("ORDERS")) if err != nil { t.Fatalf("Unexpected error: %v", err) } waitForPending(t, toSend) // Even if there are no permissions or import to check that a consumer exists, // it is still possible to bind subscription to it. sub, err = js.PullSubscribe("ORDERS", "d1", nats.Bind("ORDERS", "d1")) if err != nil { t.Fatal(err) } expected := 10 msgs, err := sub.Fetch(expected) if err != nil { t.Fatal(err) } got := len(msgs) if got != expected { t.Fatalf("Expected %d, got %d", expected, got) } // Account without permissions to lookup should be able to bind as well. eh := func(_ *nats.Conn, _ *nats.Subscription, err error) {} nc, err = nats.Connect(s.ClientURL(), nats.UserInfo("v", "quux"), nats.ErrorHandler(eh)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // Since we know that the lookup will fail, we use a smaller timeout than the 5s default. js, err = nc.JetStream(nats.MaxWait(500 * time.Millisecond)) if err != nil { t.Fatal(err) } sub, err = js.PullSubscribe("ORDERS", "d1", nats.Bind("ORDERS", "d1")) if err != nil { t.Fatal(err) } expected = 10 msgs, err = sub.Fetch(expected) if err != nil { t.Fatal(err) } got = len(msgs) if got != expected { t.Fatalf("Expected %d, got %d", expected, got) } } func TestJetStreamCrossAccountMirrorsAndSources(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 no_auth_user: rip jetstream: {max_mem_store: 64GB, max_file_store: 10TB} accounts { JS { jetstream: enabled users = [ { user: "rip", pass: "pass" } ] exports [ { service: "$JS.API.CONSUMER.>" } # To create internal consumers to mirror/source. { stream: "RI.DELIVER.SYNC.>" } # For the mirror/source consumers sending to IA via delivery subject. ] } IA { jetstream: enabled users = [ { user: "dlc", pass: "pass" } ] imports [ { service: { account: JS, subject: "$JS.API.CONSUMER.>"}, to: "RI.JS.API.CONSUMER.>" } { stream: { account: JS, subject: "RI.DELIVER.SYNC.>"} } ] } $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc1, js1 := jsClient(t, s, nats.UserInfo("rip", "pass")) defer nc1.Close() var err error _, err = js1.AddStream(&nats.StreamConfig{ Name: "TEST", Replicas: 1, }) if err != nil { t.Fatal(err) } const ( toSend = 100 publishSubj = "TEST" sourceName = "MY_SOURCE_TEST" mirrorName = "MY_MIRROR_TEST" ) for i := 0; i < toSend; i++ { data := []byte(fmt.Sprintf("OK %d", i)) if _, err := js1.Publish(publishSubj, data); err != nil { t.Fatalf("Unexpected publish error: %v", err) } } nc2, js2 := jsClient(t, s, nats.UserInfo("dlc", "pass")) defer nc2.Close() checkMsgCount := func(t *testing.T, stream string) { t.Helper() checkFor(t, 20*time.Second, 100*time.Millisecond, func() error { si, err := js2.StreamInfo(stream) if err != nil { return err } if si.State.Msgs != uint64(toSend) { return fmt.Errorf("Expected %d msgs, got state: %+v", toSend, si.State) } return nil }) } checkConsume := func(t *testing.T, js nats.JetStream, subject, stream string, want int) { t.Helper() sub, err := js.SubscribeSync(subject, nats.BindStream(stream)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() checkSubsPending(t, sub, want) for i := 0; i < want; i++ { msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatal(err) } meta, err := msg.Metadata() if err != nil { t.Fatal(err) } if got, want := meta.Stream, stream; got != want { t.Fatalf("unexpected stream name, got=%q, want=%q", got, want) } } } _, err = js2.AddStream(&nats.StreamConfig{ Name: mirrorName, Storage: nats.FileStorage, Mirror: &nats.StreamSource{ Name: publishSubj, External: &nats.ExternalStream{ APIPrefix: "RI.JS.API", DeliverPrefix: "RI.DELIVER.SYNC.MIRRORS", }, }, }) if err != nil { t.Fatal(err) } checkMsgCount(t, mirrorName) checkConsume(t, js2, publishSubj, mirrorName, toSend) _, err = js2.AddStream(&nats.StreamConfig{ Name: sourceName, Storage: nats.FileStorage, Sources: []*nats.StreamSource{ { Name: publishSubj, External: &nats.ExternalStream{ APIPrefix: "RI.JS.API", DeliverPrefix: "RI.DELIVER.SYNC.SOURCES", }, }, }, }) if err != nil { t.Fatal(err) } checkMsgCount(t, sourceName) checkConsume(t, js2, publishSubj, sourceName, toSend) } func TestJetStreamAutoMaxAckPending(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s, nats.SyncQueueLen(500)) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{Name: "foo"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } toSend := 10_000 msg := []byte("Hello") for i := 0; i < toSend; i++ { // Use plain NATS here for speed. nc.Publish("foo", msg) } nc.Flush() // Create a consumer. sub, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() expectedMaxAck, _, _ := sub.PendingLimits() ci, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.MaxAckPending != expectedMaxAck { t.Fatalf("Expected MaxAckPending to be set to %d, got %d", expectedMaxAck, ci.Config.MaxAckPending) } waitForPending := func(n int) { timeout := time.Now().Add(2 * time.Second) for time.Now().Before(timeout) { if msgs, _, _ := sub.Pending(); msgs == n { return } time.Sleep(10 * time.Millisecond) } msgs, _, _ := sub.Pending() t.Fatalf("Expected to receive %d messages, but got %d", n, msgs) } waitForPending(expectedMaxAck) // We do it twice to make sure it does not go over. waitForPending(expectedMaxAck) // Now make sure we can consume them all with no slow consumers etc. for i := 0; i < toSend; i++ { m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error receiving %d: %v", i+1, err) } m.Ack() } } func TestJetStreamInterfaces(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) var jsm nats.JetStreamManager var jsctx nats.JetStreamContext // JetStream that can publish/subscribe but cannot manage streams. nc, js := jsClient(t, s) defer nc.Close() var err error js.Publish("foo", []byte("hello")) // JetStream context that can manage streams/consumers but cannot produce messages. jsm, err = nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } jsm.AddStream(&nats.StreamConfig{Name: "FOO"}) // JetStream context that can both manage streams/consumers // as well as publish/subscribe. jsctx, err = nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } jsctx.AddStream(&nats.StreamConfig{Name: "BAR"}) jsctx.Publish("bar", []byte("hello world")) publishMsg := func(js nats.JetStream, payload []byte) { js.Publish("foo", payload) } publishMsg(js, []byte("hello world")) } func TestJetStreamSubscribe_DeliverPolicy(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } var publishTime time.Time for i := 0; i < 10; i++ { payload := fmt.Sprintf("i:%d", i) if i == 5 { publishTime = time.Now() } js.Publish("foo", []byte(payload)) time.Sleep(15 * time.Millisecond) } for _, test := range []struct { name string subopt nats.SubOpt expected int }{ { "deliver.all", nats.DeliverAll(), 10, }, { "deliver.last", nats.DeliverLast(), 1, }, { "deliver.new", nats.DeliverNew(), 0, }, { "deliver.starttime", nats.StartTime(publishTime), 5, }, { "deliver.startseq", nats.StartSequence(6), 5, }, } { test := test t.Run(test.name, func(t *testing.T) { timeout := 2 * time.Second if test.expected == 0 { timeout = 250 * time.Millisecond } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() got := 0 sub, err := js.Subscribe("foo", func(m *nats.Msg) { got++ if got == test.expected { cancel() } }, test.subopt) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() sub.Drain() if got != test.expected { t.Fatalf("Expected %d, got %d", test.expected, got) } }) } js.Publish("bar", []byte("bar msg 1")) js.Publish("bar", []byte("bar msg 2")) sub, err := js.SubscribeSync("bar", nats.BindStream("TEST"), nats.DeliverLastPerSubject()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next msg: %v", err) } if string(msg.Data) != "bar msg 2" { t.Fatalf("Unexpected last message: %q", msg.Data) } } func TestJetStreamSubscribe_AckPolicy(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 10; i++ { payload := fmt.Sprintf("i:%d", i) js.Publish("foo", []byte(payload)) } for _, test := range []struct { name string subopt nats.SubOpt expected nats.AckPolicy }{ { "ack-none", nats.AckNone(), nats.AckNonePolicy, }, { "ack-all", nats.AckAll(), nats.AckAllPolicy, }, { "ack-explicit", nats.AckExplicit(), nats.AckExplicitPolicy, }, } { test := test t.Run(test.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() got := 0 totalMsgs := 10 sub, err := js.Subscribe("foo", func(m *nats.Msg) { got++ if got == totalMsgs { cancel() } }, test.subopt, nats.Durable(test.name)) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() if got != totalMsgs { t.Fatalf("Expected %d, got %d", totalMsgs, got) } // check if consumer is configured properly ci, err := js.ConsumerInfo("TEST", test.name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.AckPolicy != test.expected { t.Fatalf("Expected %v, got %v", test.expected, ci.Config.AckPolicy) } // drain the subscription. This will remove the consumer sub.Drain() }) } checkAcks := func(t *testing.T, sub *nats.Subscription) { // Normal Ack msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } meta, err := msg.Metadata() if err != nil { t.Errorf("Unexpected error: %v", err) } if meta.Sequence.Consumer != 1 || meta.Sequence.Stream != 1 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %v", meta) } got := string(msg.Data) expected := "i:0" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } err = msg.Ack() if err != nil { t.Errorf("Unexpected error: %v", err) } // AckSync msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:1" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } // Give an already canceled context. ctx, cancel := context.WithCancel(context.Background()) cancel() err = msg.AckSync(nats.Context(ctx)) if err != context.Canceled { t.Errorf("Unexpected error: %v", err) } // Context that not yet canceled. ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() // Prevent double context and ack wait options. err = msg.AckSync(nats.Context(ctx), nats.AckWait(1*time.Second)) if err != nats.ErrContextAndTimeout { t.Errorf("Unexpected error: %v", err) } err = msg.AckSync(nats.Context(ctx)) if err != nil { t.Errorf("Unexpected error: %v", err) } err = msg.AckSync(nats.AckWait(2 * time.Second)) if err != nats.ErrMsgAlreadyAckd { t.Errorf("Unexpected error: %v", err) } // AckSync default msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:2" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } err = msg.AckSync() if err != nil { t.Errorf("Unexpected error: %v", err) } // Nak msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:3" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } // Prevent double context and ack wait options. err = msg.Nak(nats.Context(ctx), nats.AckWait(1*time.Second)) if err != nats.ErrContextAndTimeout { t.Errorf("Unexpected error: %v", err) } // Skip the message. err = msg.Nak() if err != nil { t.Errorf("Unexpected error: %v", err) } msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:4" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } err = msg.Nak(nats.AckWait(2 * time.Second)) if err != nil { t.Errorf("Unexpected error: %v", err) } msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:5" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } // Prevent double context and ack wait options. err = msg.Term(nats.Context(ctx), nats.AckWait(1*time.Second)) if err != nats.ErrContextAndTimeout { t.Errorf("Unexpected error: %v", err) } err = msg.Term() if err != nil { t.Errorf("Unexpected error: %v", err) } ctx, done := context.WithTimeout(context.Background(), 2*time.Second) defer done() // Convert context into nats option. nctx := nats.Context(ctx) msg, err = sub.NextMsgWithContext(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:6" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } err = msg.Term(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } msg, err = sub.NextMsgWithContext(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } got = string(msg.Data) expected = "i:7" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } // Prevent double context and ack wait options. err = msg.InProgress(nats.Context(ctx), nats.AckWait(1*time.Second)) if err != nats.ErrContextAndTimeout { t.Errorf("Unexpected error: %v", err) } err = msg.InProgress(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } err = msg.InProgress(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } err = msg.Ack(nctx) if err != nil { t.Errorf("Unexpected error: %v", err) } } t.Run("js sub ack", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.Durable("wq2")) if err != nil { t.Fatalf("Unexpected error: %v", err) } checkAcks(t, sub) }) t.Run("non js sub ack", func(t *testing.T) { inbox := nats.NewInbox() _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ Durable: "wq", AckPolicy: nats.AckExplicitPolicy, DeliverPolicy: nats.DeliverAllPolicy, DeliverSubject: inbox, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := nc.SubscribeSync(inbox) if err != nil { t.Fatalf("Unexpected error: %v", err) } checkAcks(t, sub) }) t.Run("Nak with delay", func(t *testing.T) { js.Publish("bar", []byte("msg")) sub, err := js.SubscribeSync("bar", nats.Durable("nak_dur")) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on NextMsg: %v", err) } if err := msg.NakWithDelay(500 * time.Millisecond); err != nil { t.Fatalf("Error on Nak: %v", err) } // We should not get redelivery before 500ms+ if _, err = sub.NextMsg(250 * time.Millisecond); err != nats.ErrTimeout { t.Fatalf("Expected timeout, got %v", err) } msg, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on NextMsg: %v", err) } if err := msg.NakWithDelay(0); err != nil { t.Fatalf("Error on Nak: %v", err) } msg, err = sub.NextMsg(250 * time.Millisecond) if err != nil { t.Fatalf("Expected timeout, got %v", err) } msg.Ack() }) t.Run("BackOff redeliveries", func(t *testing.T) { inbox := nats.NewInbox() sub, err := nc.SubscribeSync(inbox) if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() cc := nats.ConsumerConfig{ Durable: "backoff", AckPolicy: nats.AckExplicitPolicy, DeliverPolicy: nats.DeliverAllPolicy, FilterSubject: "bar", DeliverSubject: inbox, BackOff: []time.Duration{50 * time.Millisecond, 250 * time.Millisecond}, } // First, try with a MaxDeliver that is < len(BackOff), which the // server should reject. cc.MaxDeliver = 1 _, err = js.AddConsumer("TEST", &cc) if err == nil || !strings.Contains(err.Error(), "max deliver is required to be > length of backoff values") { t.Fatalf("Expected backoff/max deliver error, got %v", err) } // Now put a valid value cc.MaxDeliver = 4 ci, err := js.AddConsumer("TEST", &cc) if err != nil { t.Fatalf("Error on add consumer: %v", err) } if !reflect.DeepEqual(ci.Config.BackOff, cc.BackOff) { t.Fatalf("Expected backoff to be %v, got %v", cc.BackOff, ci.Config.BackOff) } // Consume the first delivery _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on nextMsg: %v", err) } // We should get a redelivery at around 50ms start := time.Now() _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on nextMsg: %v", err) } if dur := time.Since(start); dur < 25*time.Millisecond || dur > 100*time.Millisecond { t.Fatalf("Expected to be redelivered at around 50ms, took %v", dur) } // Now it should be every 250ms or so for i := 0; i < 2; i++ { start = time.Now() _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on nextMsg for iter=%v: %v", i+1, err) } if dur := time.Since(start); dur < 200*time.Millisecond || dur > 300*time.Millisecond { t.Fatalf("Expected to be redelivered at around 250ms, took %v", dur) } } // At this point, we should have go reach MaxDeliver _, err = sub.NextMsg(300 * time.Millisecond) if err != nats.ErrTimeout { t.Fatalf("Expected timeout, got %v", err) } }) } func TestJetStreamPullSubscribe_AckPending(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } const totalMsgs = 10 for i := 0; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) js.Publish("foo", []byte(payload)) } sub, err := js.PullSubscribe("foo", "wq", nats.AckWait(200*time.Millisecond), nats.MaxAckPending(5), ) if err != nil { t.Fatalf("Unexpected error: %v", err) } nextMsg := func() *nats.Msg { t.Helper() msgs, err := sub.Fetch(1) if err != nil { t.Fatal(err) } return msgs[0] } getPending := func() (int, int) { t.Helper() info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } return info.NumAckPending, int(info.NumPending) } getMetadata := func(msg *nats.Msg) *nats.MsgMetadata { t.Helper() meta, err := msg.Metadata() if err != nil { t.Fatalf("Unexpected error: %v", err) } return meta } expectedPending := func(inflight int, pending int) { t.Helper() checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { i, p := getPending() if i != inflight || p != pending { return fmt.Errorf("Unexpected inflight/pending msgs: %v/%v", i, p) } return nil }) } checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { inflight, pending := getPending() if inflight != 0 || pending != totalMsgs { return fmt.Errorf("Unexpected inflight/pending msgs: %v/%v", inflight, pending) } return nil }) // Normal Ack should decrease pending msg := nextMsg() err = msg.Ack() if err != nil { t.Fatal(err) } expectedPending(0, 9) meta := getMetadata(msg) if meta.Sequence.Consumer != 1 || meta.Sequence.Stream != 1 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %+v", meta) } // AckSync msg = nextMsg() err = msg.AckSync() if err != nil { t.Fatal(err) } expectedPending(0, 8) meta = getMetadata(msg) if meta.Sequence.Consumer != 2 || meta.Sequence.Stream != 2 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %+v", meta) } // Nak the message so that it is redelivered. msg = nextMsg() err = msg.Nak() if err != nil { t.Fatal(err) } expectedPending(1, 7) meta = getMetadata(msg) if meta.Sequence.Consumer != 3 || meta.Sequence.Stream != 3 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %+v", meta) } prevSeq := meta.Sequence.Stream prevPayload := string(msg.Data) // Nak same sequence again, sequence number should not change. msg = nextMsg() err = msg.Nak() if err != nil { t.Fatal(err) } expectedPending(1, 7) meta = getMetadata(msg) if meta.Sequence.Stream != prevSeq { t.Errorf("Expected to get message at seq=%v, got seq=%v", prevSeq, meta.Sequence.Stream) } if string(msg.Data) != prevPayload { t.Errorf("Expected: %q, got: %q", string(prevPayload), string(msg.Data)) } if meta.Sequence.Consumer != 4 || meta.NumDelivered != 2 { t.Errorf("Unexpected metadata: %+v", meta) } // Terminate message so it is no longer pending. msg = nextMsg() err = msg.Term() if err != nil { t.Fatal(err) } expectedPending(0, 7) meta = getMetadata(msg) if meta.Sequence.Stream != prevSeq { t.Errorf("Expected to get message at seq=%v, got seq=%v", prevSeq, meta.Sequence.Stream) } if string(msg.Data) != prevPayload { t.Errorf("Expected: %q, got: %q", string(prevPayload), string(msg.Data)) } if meta.Sequence.Consumer != 5 || meta.Sequence.Stream != 3 || meta.NumDelivered != 3 { t.Errorf("Unexpected metadata: %+v", meta) } // Get next message and ack in progress a few times msg = nextMsg() expected := "i:3" if string(msg.Data) != expected { t.Errorf("Expected: %q, got: %q", string(msg.Data), expected) } err = msg.InProgress() if err != nil { t.Fatal(err) } err = msg.InProgress() if err != nil { t.Fatal(err) } expectedPending(1, 6) meta = getMetadata(msg) if meta.Sequence.Consumer != 6 || meta.Sequence.Stream != 4 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %+v", meta) } // Now ack the message to mark it as done. err = msg.AckSync() if err != nil { t.Fatal(err) } expectedPending(0, 6) // Fetch next message, but do not ack and wait for redelivery. msg = nextMsg() expectedPending(1, 5) meta = getMetadata(msg) if meta.Sequence.Consumer != 7 || meta.Sequence.Stream != 5 || meta.NumDelivered != 1 { t.Errorf("Unexpected metadata: %+v", meta) } prevSeq = meta.Sequence.Stream time.Sleep(500 * time.Millisecond) expectedPending(1, 5) // Next message should be a redelivery. msg = nextMsg() expectedPending(1, 5) meta = getMetadata(msg) if meta.Sequence.Consumer != 8 || meta.Sequence.Stream != prevSeq || meta.NumDelivered != 2 { t.Errorf("Unexpected metadata: %+v", meta) } err = msg.AckSync() if err != nil { t.Fatal(err) } // Get rest of messages. count := 5 for count > 0 { msgs, err := sub.Fetch(count) if err != nil { t.Fatal(err) } for _, msg := range msgs { count-- getMetadata(msg) msg.Ack() } } expectedPending(0, 0) } func TestJetStreamSubscribe_AckDup(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } js.Publish("foo", []byte("hello")) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() pings := make(chan struct{}, 6) nc.Subscribe("$JS.ACK.TEST.>", func(msg *nats.Msg) { pings <- struct{}{} }) nc.Flush() ch := make(chan error, 6) _, err = js.Subscribe("foo", func(m *nats.Msg) { // Only first ack will be sent, auto ack that will occur after // this won't be sent either. ch <- m.Ack() // Any following acks should fail. ch <- m.Ack() ch <- m.Nak() ch <- m.AckSync() ch <- m.Term() ch <- m.InProgress() }) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() ackErr1 := <-ch if ackErr1 != nil { t.Errorf("Unexpected error: %v", ackErr1) } for i := 0; i < 5; i++ { e := <-ch if e != nats.ErrMsgAlreadyAckd { t.Errorf("Expected error: %v", e) } } if len(pings) != 1 { t.Logf("Expected to receive a single ack, got: %v", len(pings)) } } func TestJetStreamSubscribe_AutoAck(t *testing.T) { tests := []struct { name string opt nats.SubOpt expectedAck bool }{ { name: "with ack explicit", opt: nats.AckExplicit(), expectedAck: true, }, { name: "with ack all", opt: nats.AckAll(), expectedAck: true, }, { name: "with ack none", opt: nats.AckNone(), expectedAck: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } js.Publish("foo", []byte("hello")) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() acks := make(chan struct{}, 2) nc.Subscribe("$JS.ACK.TEST.>", func(msg *nats.Msg) { acks <- struct{}{} }) nc.Flush() _, err = js.Subscribe("foo", func(m *nats.Msg) { }, test.opt) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() if test.expectedAck { if len(acks) != 1 { t.Fatalf("Expected to receive a single ack, got: %v", len(acks)) } return } if len(acks) != 0 { t.Fatalf("Expected no acks, got: %v", len(acks)) } }) } } func TestJetStreamSubscribe_AckDupInProgress(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } js.Publish("foo", []byte("hello")) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() pings := make(chan struct{}, 3) nc.Subscribe("$JS.ACK.TEST.>", func(msg *nats.Msg) { pings <- struct{}{} }) nc.Flush() ch := make(chan error, 3) _, err = js.Subscribe("foo", func(m *nats.Msg) { // InProgress ACK can be sent any number of times. ch <- m.InProgress() ch <- m.InProgress() ch <- m.Ack() }, nats.Durable("WQ"), nats.ManualAck()) if err != nil { t.Fatalf("Unexpected error: %v", err) } <-ctx.Done() ackErr1 := <-ch ackErr2 := <-ch ackErr3 := <-ch if ackErr1 != nil { t.Errorf("Unexpected error: %v", ackErr1) } if ackErr2 != nil { t.Errorf("Unexpected error: %v", ackErr2) } if ackErr3 != nil { t.Errorf("Unexpected error: %v", ackErr3) } if len(pings) != 3 { t.Logf("Expected to receive multiple acks, got: %v", len(pings)) } } func TestJetStream_Unsubscribe(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } fetchConsumers := func(t *testing.T, expected int) { t.Helper() checkFor(t, time.Second, 15*time.Millisecond, func() error { var infos []*nats.ConsumerInfo for info := range js.Consumers("foo") { infos = append(infos, info) } if len(infos) != expected { return fmt.Errorf("Expected %d consumers, got: %d", expected, len(infos)) } return nil }) } deleteAllConsumers := func(t *testing.T) { t.Helper() for cn := range js.ConsumerNames("foo") { js.DeleteConsumer("foo", cn) } } js.Publish("foo.A", []byte("A")) js.Publish("foo.B", []byte("B")) js.Publish("foo.C", []byte("C")) t.Run("consumers deleted on unsubscribe", func(t *testing.T) { sub, err := js.SubscribeSync("foo.A") if err != nil { t.Fatal(err) } if err := sub.Unsubscribe(); err != nil { t.Errorf("Unexpected error: %v", err) } sub, err = js.SubscribeSync("foo.B", nats.Durable("B")) if err != nil { t.Fatal(err) } if err := sub.Unsubscribe(); err != nil { t.Errorf("Unexpected error: %v", err) } sub, err = js.Subscribe("foo.C", func(_ *nats.Msg) {}) if err != nil { t.Fatal(err) } if err := sub.Unsubscribe(); err != nil { t.Errorf("Unexpected error: %v", err) } sub, err = js.Subscribe("foo.C", func(_ *nats.Msg) {}, nats.Durable("C")) if err != nil { t.Fatal(err) } if err := sub.Unsubscribe(); err != nil { t.Errorf("Unexpected error: %v", err) } fetchConsumers(t, 0) }) t.Run("not deleted on unsubscribe if consumer created externally", func(t *testing.T) { // Created by JetStreamManagement if _, err = js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "wq", AckPolicy: nats.AckExplicitPolicy, // Need to specify filter subject here otherwise // would get messages from foo.A as well. FilterSubject: "foo.C", }); err != nil { t.Fatalf("Unexpected error: %v", err) } subC, err := js.PullSubscribe("foo.C", "wq") if err != nil { t.Fatalf("Unexpected error: %v", err) } fetchConsumers(t, 1) msgs, err := subC.Fetch(1, nats.MaxWait(2*time.Second)) if err != nil { t.Errorf("Unexpected error getting message: %v", err) } msg := msgs[0] got := string(msg.Data) expected := "C" if got != expected { t.Errorf("Expected %v, got %v", expected, got) } subC.Unsubscribe() fetchConsumers(t, 1) deleteAllConsumers(t) }) t.Run("consumers deleted on drain", func(t *testing.T) { subA, err := js.Subscribe("foo.A", func(_ *nats.Msg) {}) if err != nil { t.Fatal(err) } fetchConsumers(t, 1) err = subA.Drain() if err != nil { t.Errorf("Unexpected error: %v", err) } fetchConsumers(t, 0) deleteAllConsumers(t) }) t.Run("durable consumers deleted on drain", func(t *testing.T) { subB, err := js.Subscribe("foo.B", func(_ *nats.Msg) {}, nats.Durable("B")) if err != nil { t.Fatal(err) } fetchConsumers(t, 1) err = subB.Drain() if err != nil { t.Errorf("Unexpected error: %v", err) } fetchConsumers(t, 0) }) } func TestJetStream_UnsubscribeCloseDrain(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) serverURL := s.ClientURL() mc, jsm := jsClient(t, s) defer mc.Close() var err error _, err = jsm.AddStream(&nats.StreamConfig{ Name: "foo", Subjects: []string{"foo.A", "foo.B", "foo.C"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } fetchConsumers := func(t *testing.T, expected int) []*nats.ConsumerInfo { t.Helper() var infos []*nats.ConsumerInfo for info := range jsm.Consumers("foo") { infos = append(infos, info) } if len(infos) != expected { t.Fatalf("Expected %d consumers, got: %d", expected, len(infos)) } return infos } t.Run("conn drain deletes ephemeral consumers", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) nc, err := nats.Connect(serverURL, nats.ClosedHandler(func(_ *nats.Conn) { cancel() })) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.SubscribeSync("foo.C") if err != nil { t.Fatal(err) } // sub.Drain() or nc.Drain() delete JS consumer, same than Unsubscribe() nc.Drain() <-ctx.Done() fetchConsumers(t, 0) }) jsm.Publish("foo.A", []byte("A.1")) jsm.Publish("foo.B", []byte("B.1")) jsm.Publish("foo.C", []byte("C.1")) t.Run("conn close does not delete any consumer", func(t *testing.T) { nc, js := jsClient(t, s) defer nc.Close() if _, err := js.SubscribeSync("foo.A"); err != nil { t.Fatalf("Unexpected error: %v", err) } subB, err := js.SubscribeSync("foo.B", nats.Durable("B")) if err != nil { t.Fatalf("Unexpected error: %v", err) } resp, err := subB.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } got := string(resp.Data) expected := "B.1" if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } fetchConsumers(t, 2) // There will be still all consumers since nc.Close // does not delete ephemeral consumers. nc.Close() fetchConsumers(t, 2) }) jsm.Publish("foo.A", []byte("A.2")) jsm.Publish("foo.B", []byte("B.2")) jsm.Publish("foo.C", []byte("C.2")) t.Run("reattached durables consumers cannot be deleted with unsubscribe", func(t *testing.T) { nc, js := jsClient(t, s) defer nc.Close() fetchConsumers(t, 2) // The durable interest remains so have to attach now, // otherwise would get a stream already used error. subB, err := js.SubscribeSync("foo.B", nats.Durable("B")) if err != nil { t.Fatal(err) } // No new consumers created since reattached to the same one. fetchConsumers(t, 2) resp, err := subB.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } got := string(resp.Data) expected := "B.2" if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } jsm.Publish("foo.B", []byte("B.3")) // Sub can still receive the same message. resp, err = subB.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } got = string(resp.Data) expected = "B.3" if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } // Delete durable consumer. err = subB.Unsubscribe() if err != nil { t.Errorf("Unexpected error: %v", err) } // Since library did not create, the JS consumers remain. fetchConsumers(t, 2) }) } func TestJetStream_UnsubscribeDeleteNoPermissions(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: {max_mem_store: 64GB, max_file_store: 10TB} no_auth_user: guest accounts: { JS: { # User should not be able to delete consumer. jetstream: enabled users: [ {user: guest, password: "", permissions: { publish: { deny: "$JS.API.CONSUMER.DELETE.>" } }}] } } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) errCh := make(chan error, 2) nc, err := nats.Connect(s.ClientURL(), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errCh <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := nc.JetStream(nats.MaxWait(time.Second)) if err != nil { t.Fatal(err) } js.AddStream(&nats.StreamConfig{ Name: "foo", }) js.Publish("foo", []byte("test")) sub, err := js.SubscribeSync("foo") if err != nil { t.Fatal(err) } _, err = sub.NextMsg(2 * time.Second) if err != nil { t.Fatal(err) } // Should fail due to lack of permissions. err = sub.Unsubscribe() if err == nil { t.Errorf("Unexpected success attempting to delete consumer without permissions") } select { case <-time.After(2 * time.Second): t.Error("Timeout waiting for permissions error") case err = <-errCh: if !strings.Contains(err.Error(), `Permissions Violation for Publish to "$JS.API.CONSUMER.DELETE`) { t.Error("Expected permissions violation error") } } } func TestJetStreamSubscribe_ReplayPolicy(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } i := 0 totalMsgs := 10 for range time.NewTicker(100 * time.Millisecond).C { payload := fmt.Sprintf("i:%d", i) js.Publish("foo", []byte(payload)) i++ if i == totalMsgs { break } } // By default it is ReplayInstant playback policy. isub, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } ci, err := isub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.ReplayPolicy != nats.ReplayInstantPolicy { t.Fatalf("Expected original replay policy, got: %v", ci.Config.ReplayPolicy) } // Change into original playback. sub, err := js.SubscribeSync("foo", nats.ReplayOriginal()) if err != nil { t.Fatalf("Unexpected error: %v", err) } ci, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.ReplayPolicy != nats.ReplayOriginalPolicy { t.Fatalf("Expected original replay policy, got: %v", ci.Config.ReplayPolicy) } // There should already be a message delivered. _, err = sub.NextMsg(10 * time.Millisecond) if err != nil { t.Fatalf("Unexpected error: %v", err) } // We should timeout faster since too soon for the original playback. _, err = sub.NextMsg(10 * time.Millisecond) if err != nats.ErrTimeout { t.Fatalf("Expected timeout error replaying the stream, got: %v", err) } // Enough time to get the next message according to the original playback. _, err = sub.NextMsg(110 * time.Millisecond) if err != nil { t.Fatalf("Unexpected error: %v", err) } } func TestJetStreamSubscribe_RateLimit(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } totalMsgs := 2048 for i := 0; i < totalMsgs; i++ { payload := strings.Repeat("A", 1024) js.Publish("foo", []byte(payload)) } // By default there is no RateLimit isub, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } ci, err := isub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.RateLimit != 0 { t.Fatalf("Expected no rate limit, got: %v", ci.Config.RateLimit) } // Change rate limit. // Make the receive channel able to possibly hold ALL messages, but // we expect it to hold less due to rate limiting. recvd := make(chan *nats.Msg, totalMsgs) duration := 2 * time.Second ctx, cancel := context.WithTimeout(context.Background(), duration) defer cancel() var rl uint64 = 1024 sub, err := js.Subscribe("foo", func(m *nats.Msg) { recvd <- m if len(recvd) == totalMsgs { cancel() } }, nats.RateLimit(rl)) if err != nil { t.Fatalf("Unexpected error: %v", err) } ci, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if ci.Config.RateLimit != rl { t.Fatalf("Expected %v, got: %v", rl, ci.Config.RateLimit) } <-ctx.Done() if len(recvd) >= int(rl) { t.Errorf("Expected applied rate limit to push consumer, got %v msgs in %v", recvd, duration) } } func TestJetStreamSubscribe_FilterSubjects(t *testing.T) { tests := []struct { name string durable string }{ { name: "ephemeral consumer", }, { name: "durable consumer", durable: "cons", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar", "baz"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 5; i++ { js.Publish("foo", []byte("msg")) } for i := 0; i < 5; i++ { js.Publish("bar", []byte("msg")) } for i := 0; i < 5; i++ { js.Publish("baz", []byte("msg")) } opts := []nats.SubOpt{nats.BindStream("TEST"), nats.ConsumerFilterSubjects("foo", "baz")} if test.durable != "" { opts = append(opts, nats.Durable(test.durable)) } sub, err := js.SubscribeSync("", opts...) if err != nil { t.Fatalf("Unexpected error: %s", err) } for i := 0; i < 10; i++ { msg, err := sub.NextMsg(500 * time.Millisecond) if err != nil { t.Fatalf("Unexpected error: %s", err) } if msg.Subject != "foo" && msg.Subject != "baz" { t.Fatalf("Unexpected message subject: %s", msg.Subject) } } }) } } func TestJetStreamSubscribe_ConfigCantChange(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range []struct { name string first nats.SubOpt second nats.SubOpt }{ {"description", nats.Description("a"), nats.Description("b")}, {"deliver policy", nats.DeliverAll(), nats.DeliverLast()}, {"optional start sequence", nats.StartSequence(1), nats.StartSequence(10)}, {"optional start time", nats.StartTime(time.Now()), nats.StartTime(time.Now().Add(-2 * time.Hour))}, {"ack wait", nats.AckWait(10 * time.Second), nats.AckWait(15 * time.Second)}, {"max deliver", nats.MaxDeliver(3), nats.MaxDeliver(5)}, {"replay policy", nats.ReplayOriginal(), nats.ReplayInstant()}, {"max waiting", nats.PullMaxWaiting(10), nats.PullMaxWaiting(20)}, {"max ack pending", nats.MaxAckPending(10), nats.MaxAckPending(20)}, } { t.Run(test.name, func(t *testing.T) { durName := nuid.Next() sub, err := js.PullSubscribe("foo", durName, test.first) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // Once it is created, options can't be changed. _, err = js.PullSubscribe("foo", durName, test.second) if err == nil || !strings.Contains(err.Error(), test.name) { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() }) } for _, test := range []struct { name string cc *nats.ConsumerConfig opt nats.SubOpt }{ {"ack policy", &nats.ConsumerConfig{AckPolicy: nats.AckAllPolicy}, nats.AckNone()}, {"rate limit", &nats.ConsumerConfig{RateLimit: 10}, nats.RateLimit(100)}, {"flow control", &nats.ConsumerConfig{FlowControl: false}, nats.EnableFlowControl()}, {"heartbeat", &nats.ConsumerConfig{Heartbeat: 10 * time.Second}, nats.IdleHeartbeat(20 * time.Second)}, } { t.Run(test.name, func(t *testing.T) { durName := nuid.Next() cc := test.cc cc.Durable = durName cc.DeliverSubject = nuid.Next() if _, err := js.AddConsumer("TEST", cc); err != nil { t.Fatalf("Error creating consumer: %v", err) } sub, err := js.SubscribeSync("foo", nats.Durable(durName), test.opt) if err == nil || !strings.Contains(err.Error(), test.name) { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() }) } // Verify that we don't fail if user did not set it. for _, test := range []struct { name string opt nats.SubOpt }{ {"description", nats.Description("a")}, {"deliver policy", nats.DeliverAll()}, {"optional start sequence", nats.StartSequence(10)}, {"optional start time", nats.StartTime(time.Now())}, {"ack wait", nats.AckWait(10 * time.Second)}, {"max deliver", nats.MaxDeliver(3)}, {"replay policy", nats.ReplayOriginal()}, {"max waiting", nats.PullMaxWaiting(10)}, {"max ack pending", nats.MaxAckPending(10)}, } { t.Run(test.name+" not set", func(t *testing.T) { durName := nuid.Next() sub, err := js.PullSubscribe("foo", durName, test.opt) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // If not explicitly asked by the user, we are ok _, err = js.PullSubscribe("foo", durName) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() }) } for _, test := range []struct { name string opt nats.SubOpt }{ {"default deliver policy", nats.DeliverAll()}, {"default ack wait", nats.AckWait(30 * time.Second)}, {"default replay policy", nats.ReplayInstant()}, {"default max waiting", nats.PullMaxWaiting(512)}, {"default ack pending", nats.MaxAckPending(65536)}, } { t.Run(test.name, func(t *testing.T) { durName := nuid.Next() sub, err := js.PullSubscribe("foo", durName) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // If the option is the same as the server default, it is not an error either. _, err = js.PullSubscribe("foo", durName, test.opt) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() }) } for _, test := range []struct { name string opt nats.SubOpt }{ {"policy", nats.DeliverNew()}, {"ack wait", nats.AckWait(31 * time.Second)}, {"replay policy", nats.ReplayOriginal()}, {"max waiting", nats.PullMaxWaiting(513)}, {"ack pending", nats.MaxAckPending(2)}, } { t.Run(test.name+" changed from default", func(t *testing.T) { durName := nuid.Next() sub, err := js.PullSubscribe("foo", durName) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // First time it was created with defaults and the // second time a change is attempted, so it is an error. _, err = js.PullSubscribe("foo", durName, test.opt) if err == nil || !strings.Contains(err.Error(), test.name) { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() }) } // Check that binding to a durable (without specifying durable option) works if _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ Durable: "BindDurable", DeliverSubject: "bar", }); err != nil { t.Fatalf("Failed to create consumer: %v", err) } if _, err := js.SubscribeSync("foo", nats.Bind("TEST", "BindDurable")); err != nil { t.Fatalf("Error on subscribe: %v", err) } } type jsServer struct { *server.Server myopts *server.Options restart sync.Mutex } // Restart can be used to start again a server // using the same listen address as before. func (srv *jsServer) Restart() { srv.restart.Lock() defer srv.restart.Unlock() srv.Server = RunServerWithOptions(srv.myopts) } func setupJSClusterWithSize(t *testing.T, clusterName string, size int) []*jsServer { t.Helper() nodes := make([]*jsServer, size) opts := make([]*server.Options, 0) var activeListeners []net.Listener getAddr := func(t *testing.T) (string, string, int) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Unexpected error: %v", err) } addr := l.Addr() host := addr.(*net.TCPAddr).IP.String() port := addr.(*net.TCPAddr).Port time.Sleep(100 * time.Millisecond) // we cannot close the listener immediately to avoid duplicate port binding // the returned net.Listener has to be closed after all ports are drawn activeListeners = append(activeListeners, l) return addr.String(), host, port } routes := []string{} for i := 0; i < size; i++ { o := natsserver.DefaultTestOptions o.JetStream = true o.ServerName = fmt.Sprintf("NODE_%d", i) tdir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("%s_%s-", o.ServerName, clusterName)) if err != nil { t.Fatal(err) } o.StoreDir = tdir if size > 1 { o.Cluster.Name = clusterName _, host1, port1 := getAddr(t) o.Host = host1 o.Port = port1 addr2, host2, port2 := getAddr(t) o.Cluster.Host = host2 o.Cluster.Port = port2 o.Tags = []string{o.ServerName} routes = append(routes, fmt.Sprintf("nats://%s", addr2)) } opts = append(opts, &o) } // close all connections used to randomize ports for _, l := range activeListeners { l.Close() } if size > 1 { routesStr := server.RoutesFromStr(strings.Join(routes, ",")) for i, o := range opts { o.Routes = routesStr nodes[i] = &jsServer{Server: RunServerWithOptions(o), myopts: o} } } else { o := opts[0] nodes[0] = &jsServer{Server: RunServerWithOptions(o), myopts: o} } // Wait until JS is ready. srvA := nodes[0] nc, err := nats.Connect(srvA.ClientURL()) if err != nil { t.Error(err) } waitForJSReady(t, nc) nc.Close() return nodes } func withJSServer(t *testing.T, tfn func(t *testing.T, srvs ...*jsServer)) { t.Helper() opts := natsserver.DefaultTestOptions opts.Port = -1 opts.JetStream = true opts.LameDuckDuration = 3 * time.Second opts.LameDuckGracePeriod = 2 * time.Second s := &jsServer{Server: RunServerWithOptions(&opts), myopts: &opts} defer shutdownJSServerAndRemoveStorage(t, s.Server) tfn(t, s) } func withJSCluster(t *testing.T, clusterName string, size int, tfn func(t *testing.T, srvs ...*jsServer)) { t.Helper() nodes := setupJSClusterWithSize(t, clusterName, size) defer func() { // Ensure that they get shutdown and remove their state. for _, node := range nodes { node.restart.Lock() shutdownJSServerAndRemoveStorage(t, node.Server) node.restart.Unlock() } }() tfn(t, nodes...) } func withJSClusterAndStream(t *testing.T, clusterName string, size int, stream *nats.StreamConfig, tfn func(t *testing.T, subject string, srvs ...*jsServer)) { t.Helper() withJSCluster(t, clusterName, size, func(t *testing.T, nodes ...*jsServer) { srvA := nodes[0] nc, err := nats.Connect(srvA.ClientURL()) if err != nil { t.Error(err) } defer nc.Close() timeout := time.Now().Add(10 * time.Second) for time.Now().Before(timeout) { jsm, err := nc.JetStream() if err != nil { t.Fatal(err) } _, err = jsm.AccountInfo() if err != nil { // Backoff for a bit until cluster and resources are ready. time.Sleep(500 * time.Millisecond) } _, err = jsm.AddStream(stream) if err != nil { time.Sleep(500 * time.Millisecond) continue } break } if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } tfn(t, stream.Name, nodes...) }) } func waitForJSReady(t *testing.T, nc *nats.Conn) { var err error timeout := time.Now().Add(10 * time.Second) for time.Now().Before(timeout) { // Use a smaller MaxWait here since if it fails, we don't want // to wait for too long since we are going to try again. js, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) if err != nil { t.Fatal(err) } _, err = js.AccountInfo() if err != nil { continue } return } t.Fatalf("Timeout waiting for JS to be ready: %v", err) } func checkFor(t *testing.T, totalWait, sleepDur time.Duration, f func() error) { t.Helper() timeout := time.Now().Add(totalWait) var err error for time.Now().Before(timeout) { err = f() if err == nil { return } time.Sleep(sleepDur) } if err != nil { t.Fatal(err.Error()) } } func checkSubsPending(t *testing.T, sub *nats.Subscription, numExpected int) { t.Helper() checkFor(t, 4*time.Second, 20*time.Millisecond, func() error { if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != numExpected { return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) } return nil }) } func TestJetStreamStreamMirror(t *testing.T) { withJSServer(t, testJetStreamMirror_Source) } func testJetStreamMirror_Source(t *testing.T, nodes ...*jsServer) { srvA := nodes[0] nc, js := jsClient(t, srvA.Server) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "origin", Placement: &nats.Placement{ Tags: []string{"NODE_0"}, }, Storage: nats.MemoryStorage, Replicas: 1, }) if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } totalMsgs := 10 for i := 0; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) js.Publish("origin", []byte(payload)) } t.Run("create mirrors", func(t *testing.T) { _, err = js.AddStream(&nats.StreamConfig{ Name: "m1", Mirror: &nats.StreamSource{Name: "origin"}, Storage: nats.FileStorage, Replicas: 1, }) if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } _, err = js.AddStream(&nats.StreamConfig{ Name: "m2", Mirror: &nats.StreamSource{Name: "origin"}, Storage: nats.MemoryStorage, Replicas: 1, }) if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } msgs := make([]*nats.RawStreamMsg, 0) // Stored message sequences start at 1 startSequence := 1 GetNextMsg: for i := startSequence; i < totalMsgs+1; i++ { var ( err error seq = uint64(i) msgA *nats.RawStreamMsg msgB *nats.RawStreamMsg sourceMsg *nats.RawStreamMsg timeout = time.Now().Add(2 * time.Second) ) for time.Now().Before(timeout) { sourceMsg, err = js.GetMsg("origin", seq) if err != nil { time.Sleep(100 * time.Millisecond) continue } msgA, err = js.GetMsg("m1", seq) if err != nil { time.Sleep(100 * time.Millisecond) continue } if !reflect.DeepEqual(sourceMsg, msgA) { t.Errorf("Expected %+v, got: %+v", sourceMsg, msgA) } msgB, err = js.GetMsg("m2", seq) if err != nil { time.Sleep(100 * time.Millisecond) continue } if !reflect.DeepEqual(sourceMsg, msgB) { t.Errorf("Expected %+v, got: %+v", sourceMsg, msgB) } msgs = append(msgs, msgA) continue GetNextMsg } if err != nil { t.Fatalf("Unexpected error: %v", err) } } got := len(msgs) if got < totalMsgs { t.Errorf("Expected %v, got: %v", totalMsgs, got) } t.Run("consume from mirror", func(t *testing.T) { sub, err := js.SubscribeSync("origin", nats.BindStream("m1")) if err != nil { t.Fatal(err) } mmsgs := make([]*nats.Msg, 0) for i := 0; i < totalMsgs; i++ { msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Error(err) } meta, err := msg.Metadata() if err != nil { t.Error(err) } if meta.Stream != "m1" { t.Errorf("Expected m1, got: %v", meta.Stream) } mmsgs = append(mmsgs, msg) } if len(mmsgs) != totalMsgs { t.Errorf("Expected to consume %v msgs, got: %v", totalMsgs, len(mmsgs)) } }) }) t.Run("consume from original source", func(t *testing.T) { sub, err := js.SubscribeSync("origin") defer sub.Unsubscribe() if err != nil { t.Fatal(err) } msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Error(err) } meta, err := msg.Metadata() if err != nil { t.Error(err) } if meta.Stream != "origin" { t.Errorf("Expected m1, got: %v", meta.Stream) } }) t.Run("bind to non existing stream fails", func(t *testing.T) { _, err := js.SubscribeSync("origin", nats.BindStream("foo")) if err == nil { t.Fatal("Unexpected success") } if !errors.Is(err, nats.ErrStreamNotFound) { t.Fatalf("Expected error: %v; got: %v", nats.ErrStreamNotFound, err) } }) t.Run("bind to origin stream", func(t *testing.T) { // This would only avoid the stream names lookup. sub, err := js.SubscribeSync("origin", nats.BindStream("origin")) if err != nil { t.Fatal(err) } msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Error(err) } meta, err := msg.Metadata() if err != nil { t.Error(err) } if meta.Stream != "origin" { t.Errorf("Expected m1, got: %v", meta.Stream) } }) t.Run("get mirror info", func(t *testing.T) { m1, err := js.StreamInfo("m1") if err != nil { t.Fatalf("Unexpected error: %v", err) } got := m1.Mirror.Name expected := "origin" if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } m2, err := js.StreamInfo("m2") if err != nil { t.Fatalf("Unexpected error: %v", err) } got = m2.Mirror.Name expected = "origin" if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } }) t.Run("create stream from sources", func(t *testing.T) { sources := make([]*nats.StreamSource, 0) sources = append(sources, &nats.StreamSource{Name: "m1"}) sources = append(sources, &nats.StreamSource{Name: "m2"}) _, err = js.AddStream(&nats.StreamConfig{ Name: "s1", Sources: sources, Storage: nats.FileStorage, Replicas: 1, }) if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } msgs := make([]*nats.RawStreamMsg, 0) // Stored message sequences start at 1 startSequence := 1 expectedTotal := totalMsgs * 2 GetNextMsg: for i := startSequence; i < expectedTotal+1; i++ { var ( err error seq = uint64(i) msg *nats.RawStreamMsg timeout = time.Now().Add(5 * time.Second) ) Retry: for time.Now().Before(timeout) { msg, err = js.GetMsg("s1", seq) if err != nil { time.Sleep(100 * time.Millisecond) continue Retry } msgs = append(msgs, msg) continue GetNextMsg } if err != nil { t.Fatalf("Unexpected error fetching seq=%v: %v", seq, err) } } got := len(msgs) if got < expectedTotal { t.Errorf("Expected %v, got: %v", expectedTotal, got) } si, err := js.StreamInfo("s1") if err != nil { t.Fatalf("Unexpected error: %v", err) } got = int(si.State.Msgs) if got != expectedTotal { t.Errorf("Expected %v, got: %v", expectedTotal, got) } got = len(si.Sources) expected := 2 if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } t.Run("consume from sourced stream", func(t *testing.T) { sub, err := js.SubscribeSync("origin", nats.BindStream("s1")) if err != nil { t.Error(err) } _, err = sub.NextMsg(2 * time.Second) if err != nil { t.Error(err) } }) }) t.Run("update stream with sources", func(t *testing.T) { si, err := js.StreamInfo("s1") if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } got := len(si.Config.Sources) expected := 2 if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } got = len(si.Sources) if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } // Make an update config := si.Config config.MaxMsgs = 128 updated, err := js.UpdateStream(&config) if err != nil { t.Fatalf("Unexpected error creating stream: %v", err) } got = len(updated.Config.Sources) if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } got = int(updated.Config.MaxMsgs) expected = int(config.MaxMsgs) if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } }) t.Run("bind to stream with subject not in stream", func(t *testing.T) { // The Stream does not have a subject called 'nothing' but client is still able // to bind to the origin stream even though it cannot consume messages. // After updating the stream with the subject this consumer will be able to // match and receive messages. sub, err := js.SubscribeSync("nothing", nats.BindStream("origin")) if err != nil { t.Fatal(err) } _, err = sub.NextMsg(1 * time.Second) if !errors.Is(err, nats.ErrTimeout) { t.Fatal("Expected timeout error") } info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } got := info.Stream expected := "origin" if got != expected { t.Fatalf("Expected %v, got %v", expected, got) } got = info.Config.FilterSubject expected = "nothing" if got != expected { t.Fatalf("Expected %v, got %v", expected, got) } t.Run("can consume after stream update", func(t *testing.T) { _, err = js.UpdateStream(&nats.StreamConfig{ Name: "origin", Placement: &nats.Placement{ Tags: []string{"NODE_0"}, }, Storage: nats.MemoryStorage, Replicas: 1, Subjects: []string{"origin", "nothing"}, }) js.Publish("nothing", []byte("hello world")) msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Error(err) } got = msg.Subject expected = "nothing" if got != expected { t.Fatalf("Expected %v, got %v", expected, got) } }) }) t.Run("create sourced stream with a cycle", func(t *testing.T) { // Since v2.8.0, this test would fail with a "detected cycle" error. sources := make([]*nats.StreamSource, 0) sources = append(sources, &nats.StreamSource{Name: "origin"}) sources = append(sources, &nats.StreamSource{Name: "m1"}) streamName := "s2" _, err = js.AddStream(&nats.StreamConfig{ Name: streamName, Sources: sources, Storage: nats.FileStorage, Replicas: 1, }) var aerr *nats.APIError if ok := errors.As(err, &aerr); !ok || aerr.ErrorCode != nats.JSStreamInvalidConfig { t.Fatalf("Expected nats.APIError, got %v", err) } }) } func TestJetStream_ClusterMultipleSubscribe(t *testing.T) { nodes := []int{1, 3} replicas := []int{1} for _, n := range nodes { for _, r := range replicas { if r > 1 && n == 1 { continue } t.Run(fmt.Sprintf("qsub n=%d r=%d", n, r), func(t *testing.T) { name := fmt.Sprintf("MSUB%d%d", n, r) stream := &nats.StreamConfig{ Name: name, Replicas: r, } withJSClusterAndStream(t, name, n, stream, testJetStream_ClusterMultipleQueueSubscribe) }) t.Run(fmt.Sprintf("psub n=%d r=%d", n, r), func(t *testing.T) { name := fmt.Sprintf("PSUBN%d%d", n, r) stream := &nats.StreamConfig{ Name: name, Replicas: n, } withJSClusterAndStream(t, name, n, stream, testJetStream_ClusterMultiplePullSubscribe) }) t.Run(fmt.Sprintf("psub n=%d r=%d multi fetch", n, r), func(t *testing.T) { name := fmt.Sprintf("PFSUBN%d%d", n, r) stream := &nats.StreamConfig{ Name: name, Replicas: n, } withJSClusterAndStream(t, name, n, stream, testJetStream_ClusterMultipleFetchPullSubscribe) }) } } } func testJetStream_ClusterMultipleQueueSubscribe(t *testing.T, subject string, srvs ...*jsServer) { srv := srvs[0] nc, err := nats.Connect(srv.ClientURL()) if err != nil { t.Fatal(err) } defer nc.Close() var wg sync.WaitGroup ctx, done := context.WithTimeout(context.Background(), 2*time.Second) defer done() js, err := nc.JetStream() if err != nil { t.Fatal(err) } size := 5 subs := make([]*nats.Subscription, size) errCh := make(chan error, size) // We are testing auto-bind here so create one first and expect others to bind to it. sub, err := js.QueueSubscribeSync(subject, "wq", nats.Durable("shared")) if err != nil { t.Fatalf("Unexpected error: %v", err) } subs[0] = sub for i := 1; i < size; i++ { wg.Add(1) go func(n int) { defer wg.Done() var sub *nats.Subscription var err error for attempt := 0; attempt < 5; attempt++ { sub, err = js.QueueSubscribeSync(subject, "wq", nats.Durable("shared")) if err != nil { time.Sleep(1 * time.Second) continue } break } if err != nil { errCh <- err } else { subs[n] = sub } }(i) } go func() { // Unblock the main context when done. wg.Wait() done() }() wg.Wait() for i := 0; i < size*2; i++ { js.Publish(subject, []byte("test")) } delivered := 0 for _, sub := range subs { if sub == nil { continue } if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs > 0 { delivered++ } } if delivered < 2 { t.Fatalf("Expected more than one subscriber to receive a message, got: %v", delivered) } select { case <-ctx.Done(): case err := <-errCh: if err != nil { t.Fatalf("Unexpected error with multiple queue subscribers: %v", err) } } } func testJetStream_ClusterMultiplePullSubscribe(t *testing.T, subject string, srvs ...*jsServer) { srv := srvs[0] nc, js := jsClient(t, srv.Server) defer nc.Close() var wg sync.WaitGroup ctx, done := context.WithTimeout(context.Background(), 2*time.Second) defer done() size := 5 subs := make([]*nats.Subscription, size) errCh := make(chan error, size) for i := 0; i < size; i++ { wg.Add(1) go func(n int) { defer wg.Done() var sub *nats.Subscription var err error for attempt := 0; attempt < 5; attempt++ { sub, err = js.PullSubscribe(subject, "shared") if err != nil { time.Sleep(1 * time.Second) continue } break } if err != nil { errCh <- err } else { subs[n] = sub } }(i) } go func() { // Unblock the main context when done. wg.Wait() done() }() wg.Wait() for i := 0; i < size*2; i++ { js.Publish(subject, []byte("test")) } delivered := 0 for i, sub := range subs { if sub == nil { continue } for attempt := 0; attempt < 4; attempt++ { _, err := sub.Fetch(1, nats.MaxWait(250*time.Millisecond)) if err != nil { t.Logf("%v WARN: Timeout waiting for next message: %v", i, err) continue } delivered++ break } } if delivered < 2 { t.Fatalf("Expected more than one subscriber to receive a message, got: %v", delivered) } select { case <-ctx.Done(): case err := <-errCh: if err != nil { t.Fatalf("Unexpected error with multiple pull subscribers: %v", err) } } } func testJetStream_ClusterMultipleFetchPullSubscribe(t *testing.T, subject string, srvs ...*jsServer) { srv := srvs[0] nc, js := jsClient(t, srv.Server) defer nc.Close() var wg sync.WaitGroup ctx, done := context.WithTimeout(context.Background(), 5*time.Second) defer done() // Setup a number of subscriptions with different inboxes that will be // fetching the messages in parallel. nsubs := 4 subs := make([]*nats.Subscription, nsubs) errCh := make(chan error, nsubs) var queues sync.Map for i := 0; i < nsubs; i++ { wg.Add(1) go func(n int) { defer wg.Done() var sub *nats.Subscription var err error sub, err = js.PullSubscribe(subject, "shared") if err != nil { errCh <- err } else { subs[n] = sub queues.Store(sub.Subject, make([]*nats.Msg, 0)) } }(i) } // Publishing of messages happens after the subscriptions are ready. // The subscribers will be fetching messages while these are being // produced so sometimes there are not going to be messages available. wg.Wait() var ( total uint64 = 100 delivered uint64 batchSize = 2 ) go func() { for i := 0; i < int(total); i++ { js.Publish(subject, []byte(fmt.Sprintf("n:%v", i))) time.Sleep(1 * time.Millisecond) } }() ctx2, done2 := context.WithTimeout(ctx, 3*time.Second) defer done2() for _, psub := range subs { if psub == nil { continue } sub := psub subject := sub.Subject v, _ := queues.Load(sub.Subject) queue := v.([]*nats.Msg) go func() { for { select { case <-ctx2.Done(): return default: } if current := atomic.LoadUint64(&delivered); current >= total { done2() return } // Wait until all messages have been consumed. for attempt := 0; attempt < 4; attempt++ { recvd, err := sub.Fetch(batchSize, nats.MaxWait(1*time.Second)) if err != nil { if err == nats.ErrConnectionClosed { return } current := atomic.LoadUint64(&delivered) if current >= total { done2() return } else { t.Logf("WARN: Timeout waiting for next message: %v", err) } continue } for _, msg := range recvd { queue = append(queue, msg) queues.Store(subject, queue) } atomic.AddUint64(&delivered, uint64(len(recvd))) break } } }() } // Wait until context is canceled after receiving all messages. <-ctx2.Done() if delivered < total { t.Fatalf("Expected %v, got: %v", total, delivered) } select { case <-ctx.Done(): case err := <-errCh: if err != nil { t.Fatalf("Unexpected error with multiple pull subscribers: %v", err) } } var ( gotNoMessages bool count = 0 ) queues.Range(func(k, v any) bool { msgs := v.([]*nats.Msg) count += len(msgs) if len(msgs) == 0 { gotNoMessages = true return false } return true }) if gotNoMessages { t.Error("Expected all pull subscribers to receive some messages") } } func TestJetStream_ClusterReconnect(t *testing.T) { t.Skip("This test need to be revisited") n := 3 replicas := []int{1, 3} t.Run("pull sub", func(t *testing.T) { for _, r := range replicas { t.Run(fmt.Sprintf("n=%d r=%d", n, r), func(t *testing.T) { stream := &nats.StreamConfig{ Name: fmt.Sprintf("foo-qr%d", r), Replicas: r, } withJSClusterAndStream(t, fmt.Sprintf("QPULLR%d", r), n, stream, testJetStream_ClusterReconnectPullQueueSubscriber) }) } }) t.Run("sub durable", func(t *testing.T) { for _, r := range replicas { t.Run(fmt.Sprintf("n=%d r=%d", n, r), func(t *testing.T) { stream := &nats.StreamConfig{ Name: fmt.Sprintf("quux-r%d", r), Replicas: r, } withJSClusterAndStream(t, fmt.Sprintf("SUBR%d", r), n, stream, testJetStream_ClusterReconnectDurablePushSubscriber) }) } }) t.Run("qsub durable", func(t *testing.T) { for _, r := range replicas { t.Run(fmt.Sprintf("n=%d r=%d", n, r), func(t *testing.T) { stream := &nats.StreamConfig{ Name: fmt.Sprintf("bar-r%d", r), Replicas: r, } withJSClusterAndStream(t, fmt.Sprintf("QSUBR%d", r), n, stream, testJetStream_ClusterReconnectDurableQueueSubscriber) }) } }) } func testJetStream_ClusterReconnectDurableQueueSubscriber(t *testing.T, subject string, srvs ...*jsServer) { var ( srvA = srvs[0] totalMsgs = 20 reconnected = make(chan struct{}) reconnectDone bool ) nc, err := nats.Connect(srvA.ClientURL(), nats.ReconnectHandler(func(nc *nats.Conn) { reconnected <- struct{}{} // Bring back the server after the reconnect event. if !reconnectDone { reconnectDone = true srvA.Restart() } }), nats.ErrorHandler(func(_ *nats.Conn, sub *nats.Subscription, err error) { t.Logf("WARN: Got error %v", err) if info, ok := err.(*nats.ErrConsumerSequenceMismatch); ok { t.Logf("WARN: %+v", info) } // Take out this QueueSubscriber from the group. sub.Drain() }), ) if err != nil { t.Error(err) } defer nc.Close() js, err := nc.JetStream() if err != nil { t.Error(err) } for i := 0; i < 10; i++ { payload := fmt.Sprintf("i:%d", i) js.Publish(subject, []byte(payload)) } ctx, done := context.WithTimeout(context.Background(), 10*time.Second) defer done() msgs := make(chan *nats.Msg, totalMsgs) // Create some queue subscribers. for i := 0; i < 5; i++ { expected := totalMsgs dname := "dur" _, err = js.QueueSubscribe(subject, "wg", func(m *nats.Msg) { msgs <- m count := len(msgs) switch { case count == 2: // Do not ack and wait for redelivery on reconnect. srvA.Shutdown() srvA.WaitForShutdown() return case count == expected: done() } err := m.AckSync() if err != nil { // During the reconnection, both of these errors can occur. if err == nats.ErrNoResponders || err == nats.ErrTimeout { // Wait for reconnection event to occur to continue. select { case <-reconnected: return case <-time.After(1 * time.Second): return case <-ctx.Done(): return } } } }, nats.Durable(dname), nats.AckWait(5*time.Second), nats.ManualAck(), nats.IdleHeartbeat(100*time.Millisecond)) if err != nil && (err != nats.ErrTimeout && err != context.DeadlineExceeded) { t.Error(err) } } // Check for persisted messages, this could fail a few times. var stream *nats.StreamInfo timeout := time.Now().Add(5 * time.Second) for time.Now().Before(timeout) { stream, err = js.StreamInfo(subject) if err == nats.ErrTimeout { time.Sleep(100 * time.Millisecond) continue } else if err != nil { t.Fatalf("Unexpected error: %v", err) } break } if stream == nil { t.Logf("WARN: Failed to get stream info: %v", err) } var failedPubs int for i := 10; i < totalMsgs; i++ { var published bool payload := fmt.Sprintf("i:%d", i) timeout = time.Now().Add(5 * time.Second) Retry: for time.Now().Before(timeout) { _, err = js.Publish(subject, []byte(payload)) // Skip temporary errors. if err != nil && (err == nats.ErrNoStreamResponse || err == nats.ErrTimeout || err.Error() == `raft: not leader`) { time.Sleep(100 * time.Millisecond) continue Retry } else if err != nil { t.Errorf("Unexpected error: %v", err) } published = true break Retry } if !published { failedPubs++ } } <-ctx.Done() // Wait a bit to get heartbeats. time.Sleep(2 * time.Second) // Drain to allow AckSync response to be received. nc.Drain() got := len(msgs) if got != totalMsgs { t.Logf("WARN: Expected %v, got: %v (failed publishes: %v)", totalMsgs, got, failedPubs) } if got < totalMsgs-failedPubs { t.Errorf("Expected %v, got: %v", totalMsgs-failedPubs, got) } } func testJetStream_ClusterReconnectDurablePushSubscriber(t *testing.T, subject string, srvs ...*jsServer) { var ( srvA = srvs[0] srvB = srvs[1] srvC = srvs[2] totalMsgs = 20 reconnected = make(chan struct{}) reconnectDone bool ) nc, err := nats.Connect(srvA.ClientURL(), nats.ReconnectHandler(func(nc *nats.Conn) { reconnected <- struct{}{} // Bring back the server after the reconnect event. if !reconnectDone { reconnectDone = true srvA.Restart() } }), ) if err != nil { t.Error(err) } // Drain to allow Ack responses to be published. defer nc.Drain() js, err := nc.JetStream() if err != nil { t.Error(err) } // Initial burst of messages. for i := 0; i < 10; i++ { payload := fmt.Sprintf("i:%d", i) js.Publish(subject, []byte(payload)) } // For now just confirm that do receive all messages across restarts. ctx, done := context.WithTimeout(context.Background(), 10*time.Second) defer done() recvd := make(chan *nats.Msg, totalMsgs) expected := totalMsgs _, err = js.Subscribe(subject, func(m *nats.Msg) { recvd <- m if len(recvd) == expected { done() } }, nats.Durable("sd1")) if err != nil { t.Errorf("Unexpected error: %v", err) } timeout := time.Now().Add(3 * time.Second) for time.Now().Before(timeout) { if len(recvd) >= 2 { // Restart the first server. srvA.Shutdown() break } } // Wait for reconnect or timeout. select { case <-reconnected: case <-time.After(2 * time.Second): t.Error("Timeout waiting for reconnect") } for i := 10; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) timeout := time.Now().Add(5 * time.Second) Retry: for time.Now().Before(timeout) { _, err = js.Publish(subject, []byte(payload)) if err == nats.ErrNoStreamResponse || err == nats.ErrTimeout { // Temporary error. time.Sleep(100 * time.Millisecond) continue Retry } else if err != nil { t.Errorf("Unexpected error: %v", err) } break Retry } } srvBClientURL := srvB.ClientURL() srvCClientURL := srvC.ClientURL() timeout = time.Now().Add(3 * time.Second) for time.Now().Before(timeout) { if len(recvd) >= 5 { // Do another Shutdown of the server we are connected with. switch nc.ConnectedUrl() { case srvBClientURL: srvB.Shutdown() case srvCClientURL: srvC.Shutdown() default: } break } } <-ctx.Done() got := len(recvd) if got != totalMsgs { t.Logf("WARN: Expected %v, got: %v", totalMsgs, got) } } func testJetStream_ClusterReconnectPullQueueSubscriber(t *testing.T, subject string, srvs ...*jsServer) { var ( recvd = make(map[string]int) recvdQ = make(map[int][]*nats.Msg) srvA = srvs[0] totalMsgs = 20 reconnected = make(chan struct{}, 2) reconnectDone bool ) nc, err := nats.Connect(srvA.ClientURL(), nats.ReconnectHandler(func(nc *nats.Conn) { reconnected <- struct{}{} // Bring back the server after the reconnect event. if !reconnectDone { reconnectDone = true srvA.Restart() } }), ) if err != nil { t.Error(err) } defer nc.Close() js, err := nc.JetStream() if err != nil { t.Error(err) } for i := 0; i < 10; i++ { payload := fmt.Sprintf("i:%d", i) _, err := js.Publish(subject, []byte(payload)) if err != nil { t.Errorf("Unexpected error: %v", err) } } subs := make([]*nats.Subscription, 0) for i := 0; i < 5; i++ { sub, err := js.PullSubscribe(subject, "d1", nats.PullMaxWaiting(5)) if err != nil { t.Fatal(err) } subs = append(subs, sub) } for i := 10; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) _, err := js.Publish(subject, []byte(payload)) if err != nil { t.Errorf("Unexpected error: %v", err) } } ctx, done := context.WithTimeout(context.Background(), 10*time.Second) defer done() NextMsg: for len(recvd) < totalMsgs { select { case <-ctx.Done(): t.Fatalf("Timeout waiting for messages, expected: %d, got: %d", totalMsgs, len(recvd)) default: } for qsub, sub := range subs { // Server will shutdown after a couple of messages which will result // in empty messages with an status unavailable error. msgs, err := sub.Fetch(1, nats.MaxWait(2*time.Second)) if err == nats.ErrNoResponders || err == nats.ErrTimeout { // Backoff before asking for more messages. time.Sleep(100 * time.Millisecond) continue NextMsg } else if err != nil { t.Errorf("Unexpected error: %v", err) continue NextMsg } msg := msgs[0] if len(msg.Data) == 0 && msg.Header.Get("Status") == "503" { t.Fatal("Got 503 JetStream API message!") } recvd[string(msg.Data)]++ recvdQ[qsub] = append(recvdQ[qsub], msg) // Add a few retries since there can be errors during the reconnect. timeout := time.Now().Add(5 * time.Second) RetryAck: for time.Now().Before(timeout) { err = msg.AckSync() if err != nil { // During the reconnection, both of these errors can occur. if err == nats.ErrNoResponders || err == nats.ErrTimeout { // Wait for reconnection event to occur to continue. select { case <-reconnected: continue RetryAck case <-time.After(100 * time.Millisecond): continue RetryAck case <-ctx.Done(): t.Fatal("Timed out waiting for reconnect") } } t.Errorf("Unexpected error: %v", err) continue RetryAck } break RetryAck } // Shutdown the server after a couple of messages. if len(recvd) == 2 { srvA.Shutdown() } } } // Confirm the number of messages. for i := 0; i < totalMsgs; i++ { msg := fmt.Sprintf("i:%d", i) count, ok := recvd[msg] if !ok { t.Errorf("Missing message %v", msg) } else if count != 1 { t.Logf("WARN: Expected to receive a single message, got: %v", count) } } // Expect all qsubs to receive at least a message. for _, msgs := range recvdQ { if len(msgs) < 1 { t.Errorf("Expected queue sub to receive at least one message") } } } func TestJetStreamPullSubscribeOptions(t *testing.T) { withJSCluster(t, "FOPTS", 3, testJetStreamFetchOptions) } func testJetStreamFetchOptions(t *testing.T, srvs ...*jsServer) { srv := srvs[0] nc, js := jsClient(t, srv.Server) defer nc.Close() var err error subject := "WQ" _, err = js.AddStream(&nats.StreamConfig{ Name: subject, Replicas: 1, }) if err != nil { t.Fatal(err) } sendMsgs := func(t *testing.T, totalMsgs int) { t.Helper() for i := 0; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) _, err := js.Publish(subject, []byte(payload)) if err != nil { t.Errorf("Unexpected error: %v", err) } } } t.Run("max request batch", func(t *testing.T) { defer js.PurgeStream(subject) sub, err := js.PullSubscribe(subject, "max-request-batch", nats.MaxRequestBatch(2)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() if _, err := sub.Fetch(10); err == nil || !strings.Contains(err.Error(), "MaxRequestBatch of 2") { t.Fatalf("Expected error about max request batch size, got %v", err) } }) t.Run("max request max bytes", func(t *testing.T) { defer js.PurgeStream(subject) sub, err := js.PullSubscribe(subject, "max-request-max-bytes", nats.MaxRequestMaxBytes(100)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() if _, err := sub.Fetch(10, nats.PullMaxBytes(200)); err == nil || !strings.Contains(err.Error(), "MaxRequestMaxBytes of 100") { t.Fatalf("Expected error about max request max bytes, got %v", err) } }) t.Run("max request expires", func(t *testing.T) { defer js.PurgeStream(subject) sub, err := js.PullSubscribe(subject, "max-request-expires", nats.MaxRequestExpires(50*time.Millisecond)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() if _, err := sub.Fetch(10); err == nil || !strings.Contains(err.Error(), "MaxRequestExpires of 50ms") { t.Fatalf("Expected error about max request expiration, got %v", err) } }) t.Run("batch size", func(t *testing.T) { defer js.PurgeStream(subject) expected := 10 sendMsgs(t, expected) sub, err := js.PullSubscribe(subject, "batch-size") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() msgs, err := sub.Fetch(expected, nats.MaxWait(2*time.Second)) if err != nil { t.Fatal(err) } for _, msg := range msgs { msg.AckSync() } got := len(msgs) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } // Next fetch will timeout since no more messages. _, err = sub.Fetch(1, nats.MaxWait(250*time.Millisecond)) if err != nats.ErrTimeout { t.Errorf("Expected timeout fetching next message, got: %v", err) } expected = 5 sendMsgs(t, expected) msgs, err = sub.Fetch(expected, nats.MaxWait(1*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } got = len(msgs) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } for _, msg := range msgs { msg.Ack() } }) t.Run("sub drain is no op", func(t *testing.T) { defer js.PurgeStream(subject) expected := 10 sendMsgs(t, expected) sub, err := js.PullSubscribe(subject, "batch-ctx") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() msgs, err := sub.Fetch(expected, nats.MaxWait(2*time.Second)) if err != nil { t.Fatalf("Unexpected error: %v", err) } got := len(msgs) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } err = sub.Drain() if err != nil { t.Errorf("Unexpected error: %v", err) } }) t.Run("fetch after unsubscribe", func(t *testing.T) { defer js.PurgeStream(subject) expected := 10 sendMsgs(t, expected) sub, err := js.PullSubscribe(subject, "fetch-unsub") if err != nil { t.Fatal(err) } err = sub.Unsubscribe() if err != nil { t.Fatal(err) } _, err = sub.Fetch(1, nats.MaxWait(500*time.Millisecond)) if err == nil { t.Fatal("Unexpected success") } if !errors.Is(err, nats.ErrBadSubscription) { t.Fatalf("Unexpected error: %v", err) } }) t.Run("max waiting exceeded", func(t *testing.T) { defer js.PurgeStream(subject) _, err := js.AddConsumer(subject, &nats.ConsumerConfig{ Durable: "max-waiting", MaxWaiting: 2, AckPolicy: nats.AckExplicitPolicy, }) if err != nil { t.Fatal(err) } var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { go func() { defer wg.Done() sub, err := js.PullSubscribe(subject, "max-waiting") if err != nil { return } sub.Fetch(1, nats.MaxWait(time.Second)) }() } // Give time to those 2 above to fill the MaxWaiting checkFor(t, time.Second, 15*time.Millisecond, func() error { ci, err := js.ConsumerInfo(subject, "max-waiting") if err != nil { return err } if n := ci.NumWaiting; n != 2 { return fmt.Errorf("NumWaiting should be 2, was %v", n) } return nil }) // Now this request should get a 409. Currently, we do not re-fetch // on that error, so would be visible in the error returned by Fetch() sub, err := js.PullSubscribe(subject, "max-waiting") if err != nil { t.Fatal(err) } _, err = sub.Fetch(1, nats.MaxWait(time.Second)) if err == nil || !strings.Contains(err.Error(), "MaxWaiting") { t.Fatalf("Unexpected error: %v", err) } wg.Wait() }) t.Run("no wait", func(t *testing.T) { defer js.PurgeStream(subject) expected := 10 sendMsgs(t, expected) sub, err := js.PullSubscribe(subject, "no-wait") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() ctx, done := context.WithTimeout(context.Background(), 5*time.Second) defer done() recvd := make([]*nats.Msg, 0) Loop: for range time.NewTicker(100 * time.Millisecond).C { select { case <-ctx.Done(): break Loop default: } msgs, err := sub.Fetch(1, nats.MaxWait(250*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } recvd = append(recvd, msgs[0]) for _, msg := range msgs { err = msg.AckSync() if err != nil { t.Error(err) } } if len(recvd) == expected { done() break } } got := len(recvd) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } // There should only be timeout errors since no more messages. msgs, err := sub.Fetch(expected, nats.MaxWait(2*time.Second)) if err == nil { t.Fatal("Unexpected success", len(msgs)) } if err != nats.ErrTimeout { t.Fatalf("Expected timeout error, got: %v", err) } }) } func TestJetStreamPublishAsync(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // Make sure we get a proper failure when no stream is present. paf, err := js.PublishAsync("foo", []byte("Hello JS")) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-paf.Ok(): t.Fatalf("Did not expect to get PubAck") case err := <-paf.Err(): if err != nats.ErrNoResponders { t.Fatalf("Expected a ErrNoResponders error, got %v", err) } // Should be able to get the message back to resend, etc. m := paf.Msg() if m == nil { t.Fatalf("Expected to be able to retrieve the message") } if m.Subject != "foo" || string(m.Data) != "Hello JS" { t.Fatalf("Wrong message: %+v", m) } case <-time.After(time.Second): t.Fatalf("Did not receive an error in time") } // Now create a stream and expect a PubAck from <-OK(). if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { t.Fatalf("Unexpected error: %v", err) } paf, err = js.PublishAsync("TEST", []byte("Hello JS ASYNC PUB")) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case pa := <-paf.Ok(): if pa.Stream != "TEST" || pa.Sequence != 1 { t.Fatalf("Bad PubAck: %+v", pa) } case err := <-paf.Err(): t.Fatalf("Did not expect to get an error: %v", err) case <-time.After(time.Second): t.Fatalf("Did not receive a PubAck in time") } errCh := make(chan error, 1) // Make sure we can register an async err handler for these. errHandler := func(js nats.JetStream, originalMsg *nats.Msg, err error) { if originalMsg == nil { t.Fatalf("Expected non-nil original message") } errCh <- err } js, err = nc.JetStream(nats.PublishAsyncErrHandler(errHandler)) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err = js.PublishAsync("bar", []byte("Hello JS ASYNC PUB")); err != nil { t.Fatalf("Unexpected error: %v", err) } select { case err := <-errCh: if err != nats.ErrNoResponders { t.Fatalf("Expected a ErrNoResponders error, got %v", err) } case <-time.After(time.Second): t.Fatalf("Did not receive an async err in time") } // Now test that we can set our window for the JetStream context to limit number of outstanding messages. js, err = nc.JetStream(nats.PublishAsyncMaxPending(10)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { if _, err = js.PublishAsync("bar", []byte("Hello JS ASYNC PUB")); err != nil { t.Fatalf("Unexpected error: %v", err) } if np := js.PublishAsyncPending(); np > 10 { t.Fatalf("Expected num pending to not exceed 10, got %d", np) } } // Now test that we can wait on all prior outstanding if we want. js, err = nc.JetStream(nats.PublishAsyncMaxPending(10)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 500; i++ { if _, err = js.PublishAsync("bar", []byte("Hello JS ASYNC PUB")); err != nil { t.Fatalf("Unexpected error: %v", err) } } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } // Check invalid options _, err = js.PublishAsync("foo", []byte("Bad"), nats.StallWait(0)) expectedErr := "nats: stall wait should be more than 0" if err == nil || err.Error() != expectedErr { t.Errorf("Expected %v, got: %v", expectedErr, err) } _, err = js.Publish("foo", []byte("Also bad"), nats.StallWait(200*time.Millisecond)) expectedErr = "nats: stall wait cannot be set to sync publish" if err == nil || err.Error() != expectedErr { t.Errorf("Expected %v, got: %v", expectedErr, err) } nc, js = jsClient(t, s, nats.CustomInboxPrefix("_BOX")) defer nc.Close() paf, err = js.PublishAsync("foo", []byte("Hello JS with Custom Inbox")) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-paf.Ok(): t.Fatalf("Did not expect to get PubAck") case err := <-paf.Err(): if err != nats.ErrNoResponders { t.Fatalf("Expected a ErrNoResponders error, got %v", err) } m := paf.Msg() if m == nil { t.Fatalf("Expected to be able to retrieve the message") } if m.Subject != "foo" || string(m.Data) != "Hello JS with Custom Inbox" { t.Fatalf("Wrong message: %+v", m) } case <-time.After(time.Second): t.Fatalf("Did not receive an error in time") } } func TestPublishAsyncResetPendingOnReconnect(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // Now create a stream and expect a PubAck from <-OK(). if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"FOO"}}); err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error, 1) done := make(chan struct{}, 1) acks := make(chan nats.PubAckFuture, 100) go func() { for i := 0; i < 100; i++ { if ack, err := js.PublishAsync("FOO", []byte("hello")); err != nil { errs <- err return } else { acks <- ack } } close(acks) done <- struct{}{} }() select { case <-done: case err := <-errs: t.Fatalf("Unexpected error during publish: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } s.Shutdown() time.Sleep(100 * time.Millisecond) if pending := js.PublishAsyncPending(); pending != 0 { t.Fatalf("Expected no pending messages after server shutdown; got: %d", pending) } s = RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) for ack := range acks { select { case <-ack.Ok(): case err := <-ack.Err(): if !errors.Is(err, nats.ErrDisconnected) && !errors.Is(err, nats.ErrNoResponders) { t.Fatalf("Expected error: %v or %v; got: %v", nats.ErrDisconnected, nats.ErrNoResponders, err) } case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } } } func TestPublishAsyncAckTimeout(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } errs := make(chan error, 1) js, err := nc.JetStream( nats.PublishAsyncTimeout(50*time.Millisecond), nats.PublishAsyncErrHandler(func(js nats.JetStream, m *nats.Msg, e error) { errs <- e }), ) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() _, err = js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, NoAck: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ack, err := js.PublishAsync("FOO.A", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-ack.Ok(): t.Fatalf("Expected timeout") case err := <-ack.Err(): if !errors.Is(err, nats.ErrAsyncPublishTimeout) { t.Fatalf("Expected error: %v; got: %v", nats.ErrAsyncPublishTimeout, err) } case <-time.After(time.Second): t.Fatalf("Did not receive ack timeout") } // check if error callback is called select { case err := <-errs: if !errors.Is(err, nats.ErrAsyncPublishTimeout) { t.Fatalf("Expected error: %v; got: %v", nats.ErrAsyncPublishTimeout, err) } case <-time.After(time.Second): t.Fatalf("Did not receive error from error handler") } if js.PublishAsyncPending() != 0 { t.Fatalf("Expected no pending messages") } select { case <-js.PublishAsyncComplete(): case <-time.After(100 * time.Millisecond): t.Fatalf("Did not receive completion signal") } } func TestPublishAsyncClearStall(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } js, err := nc.JetStream( nats.PublishAsyncTimeout(500*time.Millisecond), nats.PublishAsyncMaxPending(100)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // use stream with no acks to test stalling _, err = js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}, NoAck: true}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range 100 { _, err := js.PublishAsync("FOO.A", []byte("hello"), nats.StallWait(1*time.Nanosecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // after publishing 100 messages, next one should fail with ErrTooManyStalledMsgs _, err = js.PublishAsync("FOO.A", []byte("hello"), nats.StallWait(50*time.Millisecond)) if !errors.Is(err, nats.ErrTooManyStalledMsgs) { t.Fatalf("Expected error: %v; got: %v", nats.ErrTooManyStalledMsgs, err) } // after publish timeout all pending messages should be cleared // and we should be able to publish again select { case <-js.PublishAsyncComplete(): case <-time.After(2 * time.Second): t.Fatalf("Did not receive completion signal") } if _, err = js.PublishAsync("FOO.A", []byte("hello")); err != nil { t.Fatalf("Unexpected error: %v", err) } if js.PublishAsyncPending() != 1 { t.Fatalf("Expected 1 pending message; got: %d", js.PublishAsyncPending()) } } func TestPublishAsyncRetryInErrHandler(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } streamCreated := make(chan struct{}) errCB := func(js nats.JetStream, m *nats.Msg, e error) { <-streamCreated _, err := js.PublishMsgAsync(m) if err != nil { t.Fatalf("Unexpected error when republishing: %v", err) } } js, err := nc.JetStream(nats.PublishAsyncErrHandler(errCB)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() errs := make(chan error, 1) done := make(chan struct{}, 1) go func() { for i := 0; i < 10; i++ { if _, err := js.PublishAsync("FOO.A", []byte("hello")); err != nil { errs <- err return } } done <- struct{}{} }() select { case <-done: case err := <-errs: t.Fatalf("Unexpected error during publish: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } _, err = js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } close(streamCreated) select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } info, err := js.StreamInfo("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.State.Msgs != 10 { t.Fatalf("Expected 10 messages in the stream; got: %d", info.State.Msgs) } } func TestJetStreamPublishAsyncPerf(t *testing.T) { // Comment out below to run this benchmark. t.SkipNow() s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // 64 byte payload. msg := make([]byte, 64) rand.Read(msg) // Setup error handler. var errors uint32 errHandler := func(js nats.JetStream, originalMsg *nats.Msg, err error) { t.Logf("Got an async err: %v", err) atomic.AddUint32(&errors, 1) } js, err := nc.JetStream( nats.PublishAsyncErrHandler(errHandler), nats.PublishAsyncMaxPending(256), ) if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.AddStream(&nats.StreamConfig{Name: "B"}); err != nil { t.Fatalf("Unexpected error: %v", err) } toSend := 1000000 start := time.Now() for i := 0; i < toSend; i++ { if _, err = js.PublishAsync("B", msg); err != nil { t.Fatalf("Unexpected error: %v", err) } } select { case <-js.PublishAsyncComplete(): if ne := atomic.LoadUint32(&errors); ne > 0 { t.Fatalf("Got unexpected errors publishing") } case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } tt := time.Since(start) fmt.Printf("Took %v to send %d msgs\n", tt, toSend) fmt.Printf("%.0f msgs/sec\n\n", float64(toSend)/tt.Seconds()) } func TestPublishAsyncRetry(t *testing.T) { tests := []struct { name string pubOpts []nats.PubOpt ackError error pubErr error }{ { name: "retry until stream is ready", pubOpts: []nats.PubOpt{ nats.RetryAttempts(10), nats.RetryWait(100 * time.Millisecond), }, }, { name: "fail after max retries", pubOpts: []nats.PubOpt{ nats.RetryAttempts(2), nats.RetryWait(50 * time.Millisecond), }, ackError: nats.ErrNoResponders, }, { name: "no retries", pubOpts: nil, ackError: nats.ErrNoResponders, }, { name: "invalid retry attempts", pubOpts: []nats.PubOpt{ nats.RetryAttempts(-1), }, pubErr: nats.ErrInvalidArg, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } // set max pending to 1 so that we can test if retries don't cause stall js, err := nc.JetStream(nats.PublishAsyncMaxPending(1)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() test.pubOpts = append(test.pubOpts, nats.StallWait(1*time.Nanosecond)) ack, err := js.PublishAsync("foo", []byte("hello"), test.pubOpts...) if !errors.Is(err, test.pubErr) { t.Fatalf("Expected error: %v; got: %v", test.pubErr, err) } if err != nil { return } errs := make(chan error, 1) go func() { // create stream with delay so that publish will receive no responders time.Sleep(300 * time.Millisecond) if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}); err != nil { errs <- err } }() select { case <-ack.Ok(): case err := <-ack.Err(): if test.ackError != nil { if !errors.Is(err, test.ackError) { t.Fatalf("Expected error: %v; got: %v", test.ackError, err) } } else { t.Fatalf("Unexpected ack error: %v", err) } case err := <-errs: t.Fatalf("Error creating stream: %v", err) case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for ack") } }) } } func TestJetStreamCleanupPublisher(t *testing.T) { t.Run("cleanup js publisher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // Create a stream. if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"FOO"}}); err != nil { t.Fatalf("Unexpected error: %v", err) } numSubs := nc.NumSubscriptions() if _, err := js.PublishAsync("FOO", []byte("hello")); err != nil { t.Fatalf("Unexpected error: %v", err) } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } if numSubs+1 != nc.NumSubscriptions() { t.Fatalf("Expected an additional subscription after publish, got %d", nc.NumSubscriptions()) } js.CleanupPublisher() if numSubs != nc.NumSubscriptions() { t.Fatalf("Expected subscriptions to be back to original count") } }) t.Run("cleanup js publisher, cancel pending acks", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() cbErr := make(chan error, 10) js, err := nc.JetStream(nats.PublishAsyncErrHandler(func(js nats.JetStream, m *nats.Msg, err error) { cbErr <- err })) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Create a stream with NoAck so that we can test that we cancel ack futures. if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"FOO"}, NoAck: true}); err != nil { t.Fatalf("Unexpected error: %v", err) } numSubs := nc.NumSubscriptions() var acks []nats.PubAckFuture for i := 0; i < 10; i++ { ack, err := js.PublishAsync("FOO", []byte("hello")) if err != nil { t.Fatalf("Unexpected error: %v", err) } acks = append(acks, ack) } asyncComplete := js.PublishAsyncComplete() select { case <-asyncComplete: t.Fatalf("Should not complete, NoAck is set") case <-time.After(200 * time.Millisecond): } if numSubs+1 != nc.NumSubscriptions() { t.Fatalf("Expected an additional subscription after publish, got %d", nc.NumSubscriptions()) } js.CleanupPublisher() if numSubs != nc.NumSubscriptions() { t.Fatalf("Expected subscriptions to be back to original count") } // check that PublishAsyncComplete channel is closed select { case <-asyncComplete: case <-time.After(5 * time.Second): t.Fatalf("Did not receive completion signal") } // check that all ack futures are canceled for _, ack := range acks { select { case err := <-ack.Err(): if !errors.Is(err, nats.ErrJetStreamPublisherClosed) { t.Fatalf("Expected JetStreamContextClosed error, got %v", err) } case <-ack.Ok(): t.Fatalf("Expected error on the ack future") case <-time.After(200 * time.Millisecond): t.Fatalf("Expected an error on the ack future") } } // check that async error handler is called for each pending ack for i := 0; i < 10; i++ { select { case err := <-cbErr: if !errors.Is(err, nats.ErrJetStreamPublisherClosed) { t.Fatalf("Expected JetStreamContextClosed error, got %v", err) } case <-time.After(200 * time.Millisecond): t.Fatalf("Expected errors to be passed from the async handler") } } }) } func TestJetStreamPublishExpectZero(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"test", "foo", "bar"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := nc.SubscribeSync("foo") if err != nil { t.Errorf("Error: %s", err) } // Explicitly set the header to zero. _, err = js.Publish("foo", []byte("bar"), nats.ExpectLastSequence(0), nats.ExpectLastSequencePerSubject(0), ) if err != nil { t.Errorf("Error: %v", err) } rawMsg, err := js.GetMsg("TEST", 1) if err != nil { t.Fatalf("Error: %s", err) } hdr, ok := rawMsg.Header["Nats-Expected-Last-Sequence"] if !ok { t.Fatal("Missing header") } got := hdr[0] expected := "0" if got != expected { t.Fatalf("Expected %v, got: %v", expected, got) } hdr, ok = rawMsg.Header["Nats-Expected-Last-Subject-Sequence"] if !ok { t.Fatal("Missing header") } got = hdr[0] expected = "0" if got != expected { t.Fatalf("Expected %v, got: %v", expected, got) } msg, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Error: %s", err) } hdr, ok = msg.Header["Nats-Expected-Last-Sequence"] if !ok { t.Fatal("Missing header") } got = hdr[0] expected = "0" if got != expected { t.Fatalf("Expected %v, got: %v", expected, got) } hdr, ok = msg.Header["Nats-Expected-Last-Subject-Sequence"] if !ok { t.Fatal("Missing header") } got = hdr[0] expected = "0" if got != expected { t.Fatalf("Expected %v, got: %v", expected, got) } } func TestJetStreamBindConsumer(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(nil); err == nil { t.Fatalf("Unexpected success") } si, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if si == nil || si.Config.Name != "foo" { t.Fatalf("StreamInfo is not correct %+v", si) } for i := 0; i < 25; i++ { js.Publish("foo", []byte("hi")) } // Both stream and consumer names are required for bind only. _, err = js.SubscribeSync("foo", nats.Bind("", "")) if err != nats.ErrStreamNameRequired { t.Fatalf("Unexpected error: %v", err) } _, err = js.SubscribeSync("foo", nats.Bind("foo", "")) if err != nats.ErrConsumerNameRequired { t.Fatalf("Unexpected error: %v", err) } _, err = js.SubscribeSync("foo", nats.Bind("foo", "push")) if err == nil || !errors.Is(err, nats.ErrConsumerNotFound) { t.Fatalf("Unexpected error: %v", err) } // Pull consumer _, err = js.PullSubscribe("foo", "pull", nats.Bind("foo", "pull")) if err == nil || !errors.Is(err, nats.ErrConsumerNotFound) { t.Fatalf("Unexpected error: %v", err) } // Push consumer _, err = js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "push", AckPolicy: nats.AckExplicitPolicy, DeliverSubject: nats.NewInbox(), }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Push Consumer Bind Only sub, err := js.SubscribeSync("foo", nats.Bind("foo", "push")) if err != nil { t.Fatal(err) } // Ambiguous declaration should not be allowed. _, err = js.SubscribeSync("foo", nats.Durable("push2"), nats.Bind("foo", "push")) if err == nil || !strings.Contains(err.Error(), `nats: duplicate consumer names (push2 and push)`) { t.Fatalf("Unexpected error: %v", err) } _, err = js.SubscribeSync("foo", nats.BindStream("foo"), nats.Bind("foo2", "push")) if err == nil || !strings.Contains(err.Error(), `nats: duplicate stream name (foo and foo2)`) { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() checkConsInactive := func() { t.Helper() checkFor(t, time.Second, 15*time.Millisecond, func() error { ci, _ := js.ConsumerInfo("foo", "push") if ci != nil && !ci.PushBound { return nil } return fmt.Errorf("Consumer %q still active", "push") }) } checkConsInactive() // Duplicate stream name is fine. sub, err = js.SubscribeSync("foo", nats.BindStream("foo"), nats.Bind("foo", "push")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Cannot have 2 instances for same durable _, err = js.SubscribeSync("foo", nats.Durable("push")) if err == nil || !strings.Contains(err.Error(), "already bound") { t.Fatalf("Unexpected error: %v", err) } // Cannot start a queue sub since plain durable is active _, err = js.QueueSubscribeSync("foo", "wq", nats.Durable("push")) if err == nil || !strings.Contains(err.Error(), "without a deliver group") { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() checkConsInactive() // Create a queue sub _, err = js.QueueSubscribeSync("foo", "wq1", nats.Durable("qpush")) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Can't create a plain sub on that durable _, err = js.SubscribeSync("foo", nats.Durable("qpush")) if err == nil || !strings.Contains(err.Error(), "cannot create a subscription for a consumer with a deliver group") { t.Fatalf("Unexpected error: %v", err) } // Try to attach different queue group _, err = js.QueueSubscribeSync("foo", "wq2", nats.Durable("qpush")) if err == nil || !strings.Contains(err.Error(), "cannot create a queue subscription") { t.Fatalf("Unexpected error: %v", err) } // Pull consumer _, err = js.AddConsumer("foo", &nats.ConsumerConfig{ Durable: "pull", AckPolicy: nats.AckExplicitPolicy, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Pull consumer can bind without create using only the stream name (since durable is required argument). _, err = js.PullSubscribe("foo", "pull", nats.Bind("foo", "pull")) if err != nil { t.Fatal(err) } // Prevent binding to durable that is from a wrong type. _, err = js.PullSubscribe("foo", "push", nats.Bind("foo", "push")) if err != nats.ErrPullSubscribeToPushConsumer { t.Fatalf("Unexpected error: %v", err) } _, err = js.SubscribeSync("foo", nats.Bind("foo", "pull")) if err != nats.ErrPullSubscribeRequired { t.Fatalf("Unexpected error: %v", err) } // Create ephemeral consumer sub1, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } cinfo, err := sub1.ConsumerInfo() if err != nil { t.Fatal(err) } // Cannot bind to ephemeral consumer because it is active. _, err = js.SubscribeSync("foo", nats.Bind("foo", cinfo.Name)) if err == nil || !strings.Contains(err.Error(), "already bound") { t.Fatalf("Unexpected error: %v", err) } // However, one can create an ephemeral Queue subscription and bind several members to it. sub2, err := js.QueueSubscribeSync("foo", "wq3") if err != nil { t.Fatalf("Unexpected error: %v", err) } // Consumer all for i := 0; i < 25; i++ { msg, err := sub2.NextMsg(time.Second) if err != nil { t.Fatalf("Error on NextMsg: %v", err) } msg.AckSync() } cinfo, _ = sub2.ConsumerInfo() sub3, err := js.QueueSubscribeSync("foo", "wq3", nats.Bind("foo", cinfo.Name)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 100; i++ { js.Publish("foo", []byte("new")) } // We expect sub3 to at least get a message if _, err := sub3.NextMsg(time.Second); err != nil { t.Fatalf("Second member failed to get a message: %v", err) } } func TestJetStreamDomain(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: { domain: ABC } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() // JS with custom domain jsd, err := nc.JetStream(nats.Domain("ABC")) if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := jsd.AccountInfo() if err != nil { t.Error(err) } got := info.Domain expected := "ABC" if got != expected { t.Errorf("Got %v, expected: %v", got, expected) } if _, err = jsd.AddStream(&nats.StreamConfig{Name: "foo"}); err != nil { t.Fatalf("Unexpected error: %v", err) } jsd.Publish("foo", []byte("first")) sub, err := jsd.SubscribeSync("foo") if err != nil { t.Fatal(err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatal(err) } got = string(msg.Data) expected = "first" if got != expected { t.Errorf("Got %v, expected: %v", got, expected) } // JS without explicit bound domain should also work. js, err := nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err = js.AccountInfo() if err != nil { t.Error(err) } got = info.Domain expected = "ABC" if got != expected { t.Errorf("Got %v, expected: %v", got, expected) } js.Publish("foo", []byte("second")) sub2, err := js.SubscribeSync("foo") if err != nil { t.Fatal(err) } msg, err = sub2.NextMsg(time.Second) if err != nil { t.Fatal(err) } got = string(msg.Data) expected = "first" if got != expected { t.Errorf("Got %v, expected: %v", got, expected) } msg, err = sub2.NextMsg(time.Second) if err != nil { t.Fatal(err) } got = string(msg.Data) expected = "second" if got != expected { t.Errorf("Got %v, expected: %v", got, expected) } // Using different domain not configured is an error. jsb, err := nc.JetStream(nats.Domain("XYZ")) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = jsb.AccountInfo() if err != nats.ErrJetStreamNotEnabled { t.Errorf("Unexpected error: %v", err) } } // Test that we properly enforce per subject msg limits. func TestJetStreamMaxMsgsPerSubject(t *testing.T) { const subjectMax = 5 msc := nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar", "baz.*"}, Storage: nats.MemoryStorage, MaxMsgsPerSubject: subjectMax, } fsc := msc fsc.Storage = nats.FileStorage cases := []struct { name string mconfig *nats.StreamConfig }{ {"MemoryStore", &msc}, {"FileStore", &fsc}, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) // Client for API requests. nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(c.mconfig) if err != nil { t.Fatalf("Unexpected error adding stream: %v", err) } defer js.DeleteStream(c.mconfig.Name) pubAndCheck := func(subj string, num int, expectedNumMsgs uint64) { t.Helper() for i := 0; i < num; i++ { if _, err = js.Publish(subj, []byte("TSLA")); err != nil { t.Fatalf("Unexpected publish error: %v", err) } } si, err := js.StreamInfo(c.mconfig.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if si.State.Msgs != expectedNumMsgs { t.Fatalf("Expected %d msgs, got %d", expectedNumMsgs, si.State.Msgs) } } pubAndCheck("foo", 1, 1) pubAndCheck("foo", 4, 5) // Now make sure our per subject limits kick in.. pubAndCheck("foo", 2, 5) pubAndCheck("baz.22", 5, 10) pubAndCheck("baz.33", 5, 15) // We are maxed so totals should be same no matter what we add here. pubAndCheck("baz.22", 5, 15) pubAndCheck("baz.33", 5, 15) // Now purge and make sure all is still good. if err := js.PurgeStream(c.mconfig.Name); err != nil { t.Fatalf("Unexpected purge error: %v", err) } pubAndCheck("foo", 1, 1) pubAndCheck("foo", 4, 5) pubAndCheck("baz.22", 5, 10) pubAndCheck("baz.33", 5, 15) }) } } func TestJetStreamDrainFailsToDeleteConsumer(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) errCh := make(chan error, 1) nc, err := nats.Connect(s.ClientURL(), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { select { case errCh <- err: default: } })) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() js, err := nc.JetStream() if err != nil { t.Fatalf("Unexpected error: %v", err) } if _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }); err != nil { t.Fatalf("Unexpected error: %v", err) } js.Publish("foo", []byte("hi")) blockCh := make(chan struct{}) sub, err := js.Subscribe("foo", func(m *nats.Msg) { <-blockCh }, nats.Durable("dur")) if err != nil { t.Fatalf("Error subscribing: %v", err) } // Initiate the drain... it won't complete because we have blocked the // message callback. sub.Drain() // Now delete the JS consumer if err := js.DeleteConsumer("TEST", "dur"); err != nil { t.Fatalf("Error deleting consumer: %v", err) } // Now unblock and make sure we get the async error close(blockCh) select { case err := <-errCh: if !strings.Contains(err.Error(), "consumer not found") { t.Fatalf("Unexpected async error: %v", err) } case <-time.After(time.Second): t.Fatal("Did not get async error") } } func TestJetStreamDomainInPubAck(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: {domain: "HUB"} `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() cfg := &nats.StreamConfig{ Name: "TEST", Storage: nats.MemoryStorage, Subjects: []string{"foo"}, } if _, err := js.AddStream(cfg); err != nil { t.Fatalf("Unexpected error: %v", err) } pa, err := js.Publish("foo", []byte("msg")) if err != nil { t.Fatalf("Error on publish: %v", err) } if pa.Domain != "HUB" { t.Fatalf("Expected PubAck to have domain of %q, got %q", "HUB", pa.Domain) } } func TestJetStreamStreamAndConsumerDescription(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() streamDesc := "stream description" si, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Description: streamDesc, Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Error adding stream: %v", err) } if si.Config.Description != streamDesc { t.Fatalf("Invalid description: %q vs %q", streamDesc, si.Config.Description) } consDesc := "consumer description" ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ Description: consDesc, Durable: "dur", DeliverSubject: "bar", }) if err != nil { t.Fatalf("Error adding consumer: %v", err) } if ci.Config.Description != consDesc { t.Fatalf("Invalid description: %q vs %q", consDesc, ci.Config.Description) } } func TestJetStreamMsgSubjectRewrite(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }); err != nil { t.Fatalf("Error adding stream: %v", err) } sub, err := nc.SubscribeSync(nats.NewInbox()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } if _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ DeliverSubject: sub.Subject, DeliverPolicy: nats.DeliverAllPolicy, }); err != nil { t.Fatalf("Error adding consumer: %v", err) } if _, err := js.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Did not get message: %v", err) } if msg.Subject != "foo" { t.Fatalf("Subject should be %q, got %q", "foo", msg.Subject) } if string(msg.Data) != "msg" { t.Fatalf("Unexpected data: %q", msg.Data) } } func TestJetStreamPullSubscribeFetchContext(t *testing.T) { withJSCluster(t, "PULLCTX", 3, testJetStreamFetchContext) } func testJetStreamFetchContext(t *testing.T, srvs ...*jsServer) { srv := srvs[0] nc, js := jsClient(t, srv.Server) defer nc.Close() var err error subject := "WQ" _, err = js.AddStream(&nats.StreamConfig{ Name: subject, Replicas: 3, }) if err != nil { t.Fatal(err) } sendMsgs := func(t *testing.T, totalMsgs int) { t.Helper() for i := 0; i < totalMsgs; i++ { payload := fmt.Sprintf("i:%d", i) _, err := js.Publish(subject, []byte(payload)) if err != nil { t.Errorf("Unexpected error: %v", err) } } } expected := 10 sendMsgs(t, expected) sub, err := js.PullSubscribe(subject, "batch-ctx") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() t.Run("ctx background", func(t *testing.T) { _, err = sub.Fetch(expected, nats.Context(context.Background())) if err == nil { t.Fatal("Unexpected success") } if err != nats.ErrNoDeadlineContext { t.Errorf("Expected context deadline error, got: %v", err) } }) t.Run("ctx canceled", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) cancel() _, err = sub.Fetch(expected, nats.Context(ctx)) if err == nil { t.Fatal("Unexpected success") } if err != context.Canceled { t.Errorf("Expected context deadline error, got: %v", err) } ctx, cancel = context.WithCancel(context.Background()) cancel() _, err = sub.Fetch(expected, nats.Context(ctx)) if err == nil { t.Fatal("Unexpected success") } if err != context.Canceled { t.Errorf("Expected context deadline error, got: %v", err) } }) t.Run("ctx timeout", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() msgs, err := sub.Fetch(expected, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %v", err) } got := len(msgs) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } info, err := sub.ConsumerInfo() if err != nil { t.Error(err) } if info.NumAckPending != expected { t.Errorf("Expected %d pending acks, got: %d", expected, info.NumAckPending) } for _, msg := range msgs { msg.AckSync() } info, err = sub.ConsumerInfo() if err != nil { t.Error(err) } if info.NumAckPending > 0 { t.Errorf("Expected no pending acks, got: %d", info.NumAckPending) } // No messages at this point. ctx, cancel = context.WithTimeout(ctx, 250*time.Millisecond) defer cancel() _, err = sub.Fetch(1, nats.Context(ctx)) if err != context.DeadlineExceeded { t.Errorf("Expected deadline exceeded fetching next message, got: %v", err) } // Send more messages then pull them with a new context expected = 5 sendMsgs(t, expected) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() // Single message fetch. msgs, err = sub.Fetch(1, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(msgs) != 1 { t.Fatalf("Expected to receive a single message, got: %d", len(msgs)) } for _, msg := range msgs { msg.Ack() } // Fetch multiple messages. expected = 4 msgs, err = sub.Fetch(expected, nats.Context(ctx)) if err != nil { t.Fatalf("Unexpected error: %v", err) } got = len(msgs) if got != expected { t.Fatalf("Got %v messages, expected at least: %v", got, expected) } for _, msg := range msgs { msg.AckSync() } info, err = sub.ConsumerInfo() if err != nil { t.Error(err) } if info.NumAckPending > 0 { t.Errorf("Expected no pending acks, got: %d", info.NumAckPending) } }) t.Run("ctx with cancel", func(t *testing.T) { // New JS context with slightly shorter timeout than default. js, err = nc.JetStream(nats.MaxWait(2 * time.Second)) if err != nil { t.Fatal(err) } sub, err := js.PullSubscribe(subject, "batch-cancel-ctx") if err != nil { t.Fatal(err) } defer sub.Unsubscribe() // Parent context ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Fetch all the messages as needed. info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } total := info.NumPending // Child context with timeout with the same duration as JS context timeout // will be created to fetch next message. msgs, err := sub.Fetch(1, nats.Context(ctx)) if err != nil { t.Fatal(err) } if len(msgs) != 1 { t.Fatalf("Expected a message, got: %d", len(msgs)) } for _, msg := range msgs { msg.AckSync() } // Fetch the rest using same cancellation context. expected := int(total - 1) msgs, err = sub.Fetch(expected, nats.Context(ctx)) if err != nil { t.Fatal(err) } if len(msgs) != expected { t.Fatalf("Expected %d messages, got: %d", expected, len(msgs)) } for _, msg := range msgs { msg.AckSync() } // Fetch more messages and wait for timeout since there are none. _, err = sub.Fetch(expected, nats.Context(ctx)) if err == nil { t.Fatal("Unexpected success") } if err != context.DeadlineExceeded { t.Fatalf("Expected deadline exceeded fetching next message, got: %v", err) } // Original cancellation context is not yet canceled, it should still work. if ctx.Err() != nil { t.Fatalf("Expected no errors in original cancellation context, got: %v", ctx.Err()) } // Should be possible to use the same context again. sendMsgs(t, 5) // Get the next message to leave 4 pending. var pending uint64 = 4 msgs, err = sub.Fetch(1, nats.Context(ctx)) if err != nil { t.Fatal(err) } if len(msgs) != 1 { t.Fatalf("Expected a message, got: %d", len(msgs)) } for _, msg := range msgs { msg.AckSync() } // Cancel finally. cancel() _, err = sub.Fetch(1, nats.Context(ctx)) if err == nil { t.Fatal("Unexpected success") } if err != context.Canceled { t.Fatalf("Expected deadline exceeded fetching next message, got: %v", err) } info, err = sub.ConsumerInfo() if err != nil { t.Fatal(err) } total = info.NumPending if total != pending { t.Errorf("Expected %d pending messages, got: %d", pending, total) } }) t.Run("MaxWait timeout should return nats error", func(t *testing.T) { _, err := sub.Fetch(1, nats.MaxWait(1*time.Nanosecond)) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expect ErrTimeout, got err=%#v", err) } }) t.Run("Context timeout should return context error", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) defer cancel() _, err := sub.Fetch(1, nats.Context(ctx)) if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("Expect context.DeadlineExceeded, got err=%#v", err) } }) } func TestJetStreamSubscribeContextCancel(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar", "baz", "foo.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } toSend := 100 for i := 0; i < toSend; i++ { js.Publish("bar", []byte("foo")) } t.Run("cancel unsubscribes and deletes ephemeral", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ch := make(chan *nats.Msg, 100) sub, err := js.Subscribe("bar", func(msg *nats.Msg) { ch <- msg // Cancel will unsubscribe and remove the subscription // of the consumer. if len(ch) >= 50 { cancel() } }, nats.Context(ctx)) if err != nil { t.Fatal(err) } select { case <-ctx.Done(): case <-time.After(3 * time.Second): t.Fatal("Timed out waiting for context to be canceled") } // Consumer should not be present since unsubscribe already called. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { info, err := sub.ConsumerInfo() if err != nil && err == nats.ErrConsumerNotFound { return nil } return fmt.Errorf("Consumer still active, got: %v (info=%+v)", err, info) }) got := len(ch) expected := 50 if got < expected { t.Errorf("Expected to receive at least %d messages, got: %d", expected, got) } }) t.Run("unsubscribe cancels child context", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() sub, err := js.Subscribe("bar", func(msg *nats.Msg) {}, nats.Context(ctx)) if err != nil { t.Fatal(err) } err = sub.Unsubscribe() if err != nil { t.Fatal(err) } // Consumer should not be present since unsubscribe already called. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { info, err := sub.ConsumerInfo() if err != nil && err == nats.ErrConsumerNotFound { return nil } return fmt.Errorf("Consumer still active, got: %v (info=%+v)", err, info) }) }) } func TestJetStreamClusterStreamLeaderChangeClientErr(t *testing.T) { t.Skip("The 2.9 server changed behavior making this test fail now") cfg := &nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, Replicas: 3, } withJSClusterAndStream(t, "R3S", 3, cfg, func(t *testing.T, stream string, servers ...*jsServer) { // We want to make sure the worse thing seen by the lower levels during a leadership change is NoResponders. // We will have three concurrent contexts going on. // 1. Leadership Changes every 500ms. // 2. Publishing messages to the stream every 10ms. // 3. StreamInfo calls every 15ms. expires := time.Now().Add(5 * time.Second) var wg sync.WaitGroup wg.Add(3) randServer := func() *server.Server { return servers[mrand.Intn(len(servers))].Server } // Leadership changes. go func() { defer wg.Done() nc, js := jsClient(t, randServer()) defer nc.Close() sds := fmt.Sprintf(server.JSApiStreamLeaderStepDownT, "TEST") for time.Now().Before(expires) { time.Sleep(500 * time.Millisecond) si, err := js.StreamInfo("TEST") expectOk(t, err) _, err = nc.Request(sds, nil, time.Second) expectOk(t, err) // Wait on new leader. checkFor(t, 5*time.Second, 50*time.Millisecond, func() error { si, err = js.StreamInfo("TEST") if err != nil { return err } if si.Cluster.Leader == "" { return errors.New("No leader yet") } return nil }) } }() // Published every 10ms toc := 0 go func() { defer wg.Done() nc, js := jsClient(t, randServer()) defer nc.Close() for time.Now().Before(expires) { time.Sleep(10 * time.Millisecond) _, err := js.Publish("foo", []byte("OK")) if err == nats.ErrTimeout { toc++ continue } expectOk(t, err) } }() // StreamInfo calls. go func() { defer wg.Done() nc, js := jsClient(t, randServer()) defer nc.Close() for time.Now().Before(expires) { time.Sleep(15 * time.Millisecond) _, err := js.StreamInfo("TEST") expectOk(t, err) } }() wg.Wait() // An occasional timeout can occur, but should be 0 or maybe 1 with ~10 leadership changes per test run. if toc > 1 { t.Fatalf("Got too many timeout errors from publish: %d", toc) } }) } func TestJetStreamConsumerConfigReplicasAndMemStorage(t *testing.T) { withJSCluster(t, "CR", 3, func(t *testing.T, nodes ...*jsServer) { nc, js := jsClient(t, nodes[0].Server) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{ Name: "CR", Subjects: []string{"foo"}, Replicas: 3, }); err != nil { t.Fatalf("Error adding stream: %v", err) } // We can't really check if the consumer ends-up with memory storage or not. // We are simply going to create a NATS subscription on the request subject // and make sure that the request contains "mem_storage:true". sub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.CR.dur") if err != nil { t.Fatalf("Error on subscribe: %v", err) } ci, err := js.AddConsumer("CR", &nats.ConsumerConfig{ Durable: "dur", DeliverSubject: "bar", Replicas: 1, MemoryStorage: true, }) if err != nil { t.Fatalf("Error adding consumer: %v", err) } if n := len(ci.Cluster.Replicas); n > 0 { t.Fatalf("Expected replicas to be 1, got %+v", ci.Cluster) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next msg: %v", err) } if str := string(msg.Data); !strings.Contains(str, "mem_storage\":true") { t.Fatalf("Does not look like the request asked for memory storage: %s", str) } }) } func TestJetStreamRePublish(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{ Name: "RP", Storage: nats.MemoryStorage, Subjects: []string{"foo", "bar", "baz"}, RePublish: &nats.RePublish{ Source: ">", Destination: "RP.>", }, }); err != nil { t.Fatalf("Error adding stream: %v", err) } sub, err := nc.SubscribeSync("RP.>") if err != nil { t.Fatalf("Error on subscribe: %v", err) } msg, toSend := []byte("OK TO REPUBLISH?"), 100 for i := 0; i < toSend; i++ { js.Publish("foo", msg) js.Publish("bar", msg) js.Publish("baz", msg) } lseq := map[string]int{ "foo": 0, "bar": 0, "baz": 0, } for i := 1; i <= toSend; i++ { m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next msg: %v", err) } // Grab info from Header stream := m.Header.Get(nats.JSStream) if stream != "RP" { t.Fatalf("Unexpected header: %+v", m.Header) } // Make sure sequence is correct. seq, err := strconv.Atoi(m.Header.Get(nats.JSSequence)) if err != nil { t.Fatalf("Error decoding sequence for %s", m.Header.Get(nats.JSSequence)) } if seq != i { t.Fatalf("Expected sequence to be %v, got %v", i, seq) } // Make sure last sequence matches last seq we received on this subject. last, err := strconv.Atoi(m.Header.Get(nats.JSLastSequence)) if err != nil { t.Fatalf("Error decoding last sequence for %s", m.Header.Get(nats.JSLastSequence)) } if last != lseq[m.Subject] { t.Fatalf("Expected last sequence to be %v, got %v", lseq[m.Subject], last) } lseq[m.Subject] = seq } } func TestJetStreamDirectGetMsg(t *testing.T) { // Using standlone server here, we are testing the client side API, not // the server feature, which has tests checking it works in cluster mode. s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() si, err := js.AddStream(&nats.StreamConfig{ Name: "DGM", Storage: nats.MemoryStorage, Subjects: []string{"foo", "bar"}, }) if err != nil { t.Fatalf("Error adding stream: %v", err) } send := func(subj, body string) { t.Helper() if _, err := js.Publish(subj, []byte(body)); err != nil { t.Fatalf("Error on publish: %v", err) } } send("foo", "a") send("foo", "b") send("foo", "c") send("bar", "d") send("foo", "e") // Without AllowDirect, we should get no responders if _, err := js.GetMsg("DGM", 1, nats.DirectGet()); err != nats.ErrNoResponders { t.Fatalf("Unexpected error: %v", err) } // Update stream: si.Config.AllowDirect = true si, err = js.UpdateStream(&si.Config) if err != nil { t.Fatalf("Error updating stream: %v", err) } if !si.Config.AllowDirect { t.Fatalf("AllowDirect should be true: %+v", si) } check := func(seq uint64, opt nats.JSOpt, useGetLast bool, expectedSubj string, expectedSeq uint64, expectedBody string) { t.Helper() var msg *nats.RawStreamMsg var err error if useGetLast { msg, err = js.GetLastMsg("DGM", expectedSubj, []nats.JSOpt{opt}...) } else { msg, err = js.GetMsg("DGM", seq, []nats.JSOpt{opt}...) } if err != nil { t.Fatalf("Unable to get message: %v", err) } if msg.Subject != expectedSubj { t.Fatalf("Expected subject %q, got %q", expectedSubj, msg.Subject) } if msg.Sequence != expectedSeq { t.Fatalf("Expected sequence %v, got %v", expectedSeq, msg.Sequence) } if msg.Time.IsZero() { t.Fatal("Expected timestamp, did not get one") } if b := string(msg.Data); b != expectedBody { t.Fatalf("Expected body %q, got %q", expectedBody, b) } } check(0, nats.DirectGetNext("bar"), false, "bar", 4, "d") check(0, nats.DirectGet(), true, "foo", 5, "e") check(0, nats.DirectGetNext("foo"), false, "foo", 1, "a") check(4, nats.DirectGetNext("foo"), false, "foo", 5, "e") check(2, nats.DirectGetNext("foo"), false, "foo", 2, "b") msg := nats.NewMsg("foo") msg.Header.Set("MyHeader", "MyValue") if _, err := js.PublishMsg(msg); err != nil { t.Fatalf("Error publishing message: %v", err) } r, err := js.GetMsg("DGM", 6, nats.DirectGet()) if err != nil { t.Fatalf("Error getting message: %v", err) } if v := r.Header.Get("MyHeader"); v != "MyValue" { t.Fatalf("Expected header to be present, was not: %v", r.Header) } // Check for not found if _, err := js.GetMsg("DGM", 100, nats.DirectGet()); err != nats.ErrMsgNotFound { t.Fatalf("Expected not found error, got %v", err) } // Or invalid request if _, err := js.GetMsg("DGM", 0, nats.DirectGet()); err == nil || !strings.Contains(err.Error(), "Empty Request") { t.Fatalf("Unexpected error: %v", err) } // Test direct get by subject by trying to get 'bar' directly r, err = js.GetLastMsg("DGM", "bar", nats.DirectGet()) if err != nil { t.Fatalf("Error getting message: %v", err) } if r.Subject != "bar" { t.Fatalf("expected subject to be 'bar', got: %v", r.Subject) } if string(r.Data) != "d" { t.Fatalf("expected data to be 'd', got: %v", string(r.Data)) } } func TestJetStreamConsumerReplicasOption(t *testing.T) { withJSCluster(t, "CR", 3, func(t *testing.T, nodes ...*jsServer) { nc, js := jsClient(t, nodes[0].Server) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{ Name: "ConsumerReplicasTest", Subjects: []string{"foo"}, Replicas: 3, }); err != nil { t.Fatalf("Error adding stream: %v", err) } // Subscribe to the stream with a durable consumer "bar" and replica set to 1. cb := func(msg *nats.Msg) {} _, err := js.Subscribe("foo", cb, nats.Durable("bar"), nats.ConsumerReplicas(1)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // Get consumer info consInfo, err := js.ConsumerInfo("ConsumerReplicasTest", "bar") if err != nil { t.Fatalf("Error getting consumer info: %v", err) } // Check if the number of replicas is the same as we provided. if consInfo.Config.Replicas != 1 { t.Fatalf("Expected consumer replica to be %v, got %+v", 1, consInfo.Config.Replicas) } }) } func TestJetStreamMsgAckShouldErrForConsumerAckNone(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{ Name: "ACKNONE", Storage: nats.MemoryStorage, Subjects: []string{"foo"}, }); err != nil { t.Fatalf("Error adding stream: %v", err) } if _, err := js.Publish("foo", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } sub, err := js.SubscribeSync("foo", nats.OrderedConsumer()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } if err := msg.Ack(); err != nats.ErrCantAckIfConsumerAckNone { t.Fatalf("Expected error indicating that sub is AckNone, got %v", err) } } func TestJetStreamOrderedConsumerRecreateAfterReconnect(t *testing.T) { s := RunBasicJetStreamServer() // monitor for ErrConsumerNotActive error and suppress logging hbMissed := make(chan struct{}, 10) errHandler := func(c *nats.Conn, s *nats.Subscription, err error) { if !errors.Is(err, nats.ErrConsumerNotActive) { t.Fatalf("Unexpected error: %v", err) } hbMissed <- struct{}{} } nc, js := jsClient(t, s, nats.ErrorHandler(errHandler)) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}); err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.SubscribeSync("FOO.A", nats.OrderedConsumer(), nats.IdleHeartbeat(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } consInfo, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } consName := consInfo.Name // validate that the generated name of the consumer is 8 // characters long (shorter than standard nuid) if len(consName) != 8 { t.Fatalf("Unexpected consumer name: %q", consName) } if _, err := js.Publish("FOO.A", []byte("msg 1")); err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err := sub.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if string(msg.Data) != "msg 1" { t.Fatalf("Invalid msg value; want: 'msg 1'; got: %q", string(msg.Data)) } apiSub, err := nc.SubscribeSync("$JS.API.CONSUMER.*.>") if err != nil { t.Fatalf("Unexpected error: %v", err) } // restart the server s = restartBasicJSServer(t, s) defer shutdownJSServerAndRemoveStorage(t, s) // wait until we miss heartbeat select { case <-hbMissed: case <-time.After(10 * time.Second): t.Fatalf("Did not receive consumer not active error") } consDeleteMsg, err := apiSub.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.HasPrefix(consDeleteMsg.Subject, "$JS.API.CONSUMER.") { t.Fatalf("Unexpected message subject: %q", consDeleteMsg.Subject) } consCreateMsg, err := apiSub.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.HasPrefix(consCreateMsg.Subject, "$JS.API.CONSUMER.") { t.Fatalf("Unexpected message subject: %q", consCreateMsg.Subject) } if _, err := js.Publish("FOO.A", []byte("msg 2")); err != nil { t.Fatalf("Unexpected error: %v", err) } msg, err = sub.NextMsg(2 * time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } consInfo, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } if consInfo.Name == consName || len(consInfo.Name) != 8 { t.Fatalf("Unexpected consumer name: %q", consInfo.Name) } // make sure we pick up where we left off if string(msg.Data) != "msg 2" { t.Fatalf("Invalid msg value; want: 'msg 2'; got: %q", string(msg.Data)) } } func TestJetStreamCreateStreamDiscardPolicy(t *testing.T) { tests := []struct { name string discardPolicy nats.DiscardPolicy discardNewPerSubject bool maxMsgsPerSubject int64 withAPIError bool }{ { name: "with discard policy 'new' and discard new per subject set", discardPolicy: nats.DiscardNew, discardNewPerSubject: true, maxMsgsPerSubject: 100, }, { name: "with discard policy 'new' and discard new per subject not set", discardPolicy: nats.DiscardNew, discardNewPerSubject: false, maxMsgsPerSubject: 100, }, { name: "with discard policy 'old' and discard new per subject set", discardPolicy: nats.DiscardOld, discardNewPerSubject: true, maxMsgsPerSubject: 100, withAPIError: true, }, { name: "with discard policy 'old' and discard new per subject not set", discardPolicy: nats.DiscardOld, discardNewPerSubject: true, maxMsgsPerSubject: 100, withAPIError: true, }, { name: "with discard policy 'new' and discard new per subject set and max msgs per subject not set", discardPolicy: nats.DiscardNew, discardNewPerSubject: true, withAPIError: true, }, } s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() for i, test := range tests { t.Run(test.name, func(t *testing.T) { streamName := fmt.Sprintf("FOO%d", i) _, err := js.AddStream(&nats.StreamConfig{ Name: streamName, Discard: test.discardPolicy, DiscardNewPerSubject: test.discardNewPerSubject, MaxMsgsPerSubject: test.maxMsgsPerSubject, }) if test.withAPIError { var apiErr *nats.APIError if err == nil { t.Fatalf("Expected error, got nil") } if ok := errors.As(err, &apiErr); !ok { t.Fatalf("Expected nats.APIError, got %v", err) } return } if err != nil { t.Fatalf("Unexpected error: %v", err) } info, err := js.StreamInfo(streamName) if err != nil { t.Fatalf("Unexpected error: %v", err) } if info.Config.Discard != test.discardPolicy { t.Fatalf("Invalid value of discard policy; want: %s; got: %s", test.discardPolicy.String(), info.Config.Discard.String()) } if info.Config.DiscardNewPerSubject != test.discardNewPerSubject { t.Fatalf("Invalid value of discard_new_per_subject; want: %t; got: %t", test.discardNewPerSubject, info.Config.DiscardNewPerSubject) } }) } } func TestJetStreamStreamInfoAlternates(t *testing.T) { withJSCluster(t, "R3S", 3, func(t *testing.T, nodes ...*jsServer) { nc, js := jsClient(t, nodes[0].Server) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) expectOk(t, err) // Create a mirror as well. _, err = js.AddStream(&nats.StreamConfig{ Name: "MIRROR", Mirror: &nats.StreamSource{ Name: "TEST", }, }) expectOk(t, err) si, err := js.StreamInfo("TEST") expectOk(t, err) if len(si.Alternates) != 2 { t.Fatalf("Expected 2 alternates, got %d", len(si.Alternates)) } }) } func TestJetStreamSubscribeConsumerName(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo", "bar", "baz", "foo.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.Publish("foo", []byte("first")) if err != nil { t.Fatal(err) } // Lookup the stream for testing. _, err = js.StreamInfo("TEST") if err != nil { t.Fatalf("stream lookup failed: %v", err) } sub, err := js.SubscribeSync("foo", nats.ConsumerName("my-ephemeral")) if err != nil { t.Fatal(err) } cinfo, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } got := cinfo.Config.Name expected := "my-ephemeral" if got != expected { t.Fatalf("Expected: %v, got: %v", expected, got) } // Confirm that this is a durable. got = cinfo.Config.Durable expected = "" if got != expected { t.Fatalf("Expected: %v, got: %v", expected, got) } _, err = sub.NextMsg(1 * time.Second) if err != nil { t.Fatal(err) } // ConsumerName will be ignored in case a durable name has been set. sub, err = js.SubscribeSync("foo", nats.Durable("durable"), nats.ConsumerName("custom-name")) if err != nil { t.Fatal(err) } cinfo, err = sub.ConsumerInfo() if err != nil { t.Fatal(err) } got = cinfo.Config.Name expected = "durable" if got != expected { t.Fatalf("Expected: %v, got: %v", expected, got) } got = cinfo.Config.Durable expected = "durable" if got != expected { t.Fatalf("Expected: %v, got: %v", expected, got) } _, err = sub.NextMsg(1 * time.Second) if err != nil { t.Fatal(err) } // Default Ephemeral name should be short like in the server. sub, err = js.SubscribeSync("foo", nats.ConsumerName("")) if err != nil { t.Fatal(err) } cinfo, err = sub.ConsumerInfo() if err != nil { t.Fatal(err) } expectedSize := 8 result := len(cinfo.Config.Name) if result != expectedSize { t.Fatalf("Expected: %v, got: %v", expectedSize, result) } } func TestJetStreamOrderedConsumerDeleteAssets(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // For capturing errors. errCh := make(chan error, 1) nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errCh <- err }) // Create a sample asset. mlen := 128 * 1024 msg := make([]byte, mlen) createStream := func() { t.Helper() _, err = js.AddStream(&nats.StreamConfig{ Name: "OBJECT", Subjects: []string{"a"}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Now send into the stream as chunks. const chunkSize = 256 for i := 0; i < mlen; i += chunkSize { var chunk []byte if mlen-i <= chunkSize { chunk = msg[i:] } else { chunk = msg[i : i+chunkSize] } js.PublishAsync("a", chunk) } select { case <-js.PublishAsyncComplete(): case <-time.After(time.Second): t.Fatalf("Did not receive completion signal") } } t.Run("remove stream, expect error", func(t *testing.T) { createStream() sub, err := js.SubscribeSync("a", nats.OrderedConsumer(), nats.IdleHeartbeat(200*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() // Since we are sync we will be paused here due to flow control. time.Sleep(100 * time.Millisecond) // Now delete the asset and make sure we get an error. if err := js.DeleteStream("OBJECT"); err != nil { t.Fatalf("Unexpected error: %v", err) } // Make sure we get an error. select { case err := <-errCh: if !errors.Is(err, nats.ErrStreamNotFound) { t.Fatalf("Got wrong error, wanted %v, got %v", nats.ErrStreamNotFound, err) } case <-time.After(time.Second): t.Fatalf("Did not receive err message as expected") } }) t.Run("remove consumer, expect it to be recreated", func(t *testing.T) { createStream() createConsSub, err := nc.SubscribeSync("$JS.API.CONSUMER.CREATE.OBJECT.*.a") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer createConsSub.Unsubscribe() // Again here the IdleHeartbeat is not required, just overriding top shorten test time. sub, err := js.SubscribeSync("a", nats.OrderedConsumer(), nats.IdleHeartbeat(200*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() createConsMsg, err := createConsSub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(createConsMsg.Data), `"stream_name":"OBJECT"`) { t.Fatalf("Invalid message on create consumer subject: %q", string(createConsMsg.Data)) } time.Sleep(100 * time.Millisecond) ci, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } consName := ci.Name if err := js.DeleteConsumer("OBJECT", consName); err != nil { t.Fatalf("Unexpected error: %v", err) } createConsMsg, err = createConsSub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if !strings.Contains(string(createConsMsg.Data), `"stream_name":"OBJECT"`) { t.Fatalf("Invalid message on create consumer subject: %q", string(createConsMsg.Data)) } time.Sleep(100 * time.Millisecond) ci, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } newConsName := ci.Name if consName == newConsName { t.Fatalf("Consumer should be recreated, but consumer name is the same") } }) } // We want to make sure we do the right thing with lots of concurrent queue durable consumer requests. // One should win and the others should share the delivery subject with the first one who wins. func TestJetStreamConcurrentQueueDurablePushConsumers(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create stream. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Now create 10 durables concurrently. subs := make([]*nats.Subscription, 0, 10) var wg sync.WaitGroup mx := &sync.Mutex{} for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() sub, _ := js.QueueSubscribeSync("foo", "bar") mx.Lock() subs = append(subs, sub) mx.Unlock() }() } // Wait for all the consumers. wg.Wait() si, err := js.StreamInfo("TEST") if err != nil { t.Fatalf("Unexpected error: %v", err) } if si.State.Consumers != 1 { t.Fatalf("Expected exactly one consumer, got %d", si.State.Consumers) } // Now send some messages and make sure they are distributed. total := 1000 for i := 0; i < total; i++ { js.Publish("foo", []byte("Hello")) } timeout := time.Now().Add(2 * time.Second) got := 0 for time.Now().Before(timeout) { got = 0 for _, sub := range subs { pending, _, _ := sub.Pending() // If a single sub has the total, then probably something is not right. if pending == total { t.Fatalf("A single member should not have gotten all messages") } got += pending } if got == total { // We are done! return } } t.Fatalf("Expected %v messages, got only %v", total, got) } func TestJetStreamAckTokens(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create the stream using our client API. _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } now := time.Now() for _, test := range []struct { name string expected *nats.MsgMetadata str string end string err bool }{ { "valid token size but not js ack", nil, "1.2.3.4.5.6.7.8.9", "", true, }, { "valid token size but not js ack", nil, "1.2.3.4.5.6.7.8.9.10.11.12", "", true, }, { "invalid token size", nil, "$JS.ACK.3.4.5.6.7.8", "", true, }, { "invalid token size", nil, "$JS.ACK.3.4.5.6.7.8.9.10", "", true, }, { "v1 style", &nats.MsgMetadata{ Stream: "TEST", Consumer: "cons", NumDelivered: 1, Sequence: nats.SequencePair{ Stream: 2, Consumer: 3, }, Timestamp: now, NumPending: 4, }, "", "", false, }, { "v2 style no domain with hash", &nats.MsgMetadata{ Stream: "TEST", Consumer: "cons", NumDelivered: 1, Sequence: nats.SequencePair{ Stream: 2, Consumer: 3, }, Timestamp: now, NumPending: 4, }, "_.ACCHASH.", ".abcde", false, }, { "v2 style with domain and hash", &nats.MsgMetadata{ Domain: "HUB", Stream: "TEST", Consumer: "cons", NumDelivered: 1, Sequence: nats.SequencePair{ Stream: 2, Consumer: 3, }, Timestamp: now, NumPending: 4, }, "HUB.ACCHASH.", ".abcde", false, }, { "more than 12 tokens", &nats.MsgMetadata{ Domain: "HUB", Stream: "TEST", Consumer: "cons", NumDelivered: 1, Sequence: nats.SequencePair{ Stream: 2, Consumer: 3, }, Timestamp: now, NumPending: 4, }, "HUB.ACCHASH.", ".abcde.ghijk.lmnop", false, }, } { t.Run(test.name, func(t *testing.T) { msg := nats.NewMsg("foo") msg.Sub = sub if test.err { msg.Reply = test.str } else { msg.Reply = fmt.Sprintf("$JS.ACK.%sTEST.cons.1.2.3.%v.4%s", test.str, now.UnixNano(), test.end) } meta, err := msg.Metadata() if test.err { if err == nil || meta != nil { t.Fatalf("Expected error for content: %q, got meta=%+v err=%v", test.str, meta, err) } // Expected error, we are done return } if err != nil { t.Fatalf("Expected: %+v with reply: %q, got error %v", test.expected, msg.Reply, err) } if meta.Timestamp.UnixNano() != now.UnixNano() { t.Fatalf("Timestamp is bad: %v vs %v", now.UnixNano(), meta.Timestamp.UnixNano()) } meta.Timestamp = time.Time{} test.expected.Timestamp = time.Time{} if !reflect.DeepEqual(test.expected, meta) { t.Fatalf("Expected %+v, got %+v", test.expected, meta) } }) } } func TestJetStreamTracing(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() ctr := 0 js, err := nc.JetStream(&nats.ClientTrace{ RequestSent: func(subj string, payload []byte) { ctr++ if subj != "$JS.API.STREAM.CREATE.X" { t.Fatalf("Expected sent trace to %s: got: %s", "$JS.API.STREAM.CREATE.X", subj) } }, ResponseReceived: func(subj string, payload []byte, hdr nats.Header) { ctr++ if subj != "$JS.API.STREAM.CREATE.X" { t.Fatalf("Expected received trace to %s: got: %s", "$JS.API.STREAM.CREATE.X", subj) } }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddStream(&nats.StreamConfig{Name: "X"}) if err != nil { t.Fatalf("add stream failed: %s", err) } if ctr != 2 { t.Fatalf("did not receive all trace events: %d", ctr) } } func TestJetStreamExpiredPullRequests(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("foo", "bar", nats.PullMaxWaiting(2)) if err != nil { t.Fatalf("Error on subscribe: %v", err) } // Make sure that we reject batch < 1 if _, err := sub.Fetch(0); err == nil { t.Fatal("Expected error, did not get one") } if _, err := sub.Fetch(-1); err == nil { t.Fatal("Expected error, did not get one") } // Send 2 fetch requests for i := 0; i < 2; i++ { if _, err = sub.Fetch(1, nats.MaxWait(15*time.Millisecond)); err == nil { t.Fatalf("Expected error, got none") } } // Wait before the above expire time.Sleep(50 * time.Millisecond) batches := []int{1, 10} for _, bsz := range batches { start := time.Now() _, err = sub.Fetch(bsz, nats.MaxWait(250*time.Millisecond)) dur := time.Since(start) if err == nil || dur < 50*time.Millisecond { t.Fatalf("Expected error and wait for 250ms, got err=%v and dur=%v", err, dur) } } } func TestJetStreamSyncSubscribeWithMaxAckPending(t *testing.T) { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.JetStream = true opts.JetStreamLimits.MaxAckPending = 123 s := RunServerWithOptions(&opts) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{Name: "MAX_ACK_PENDING", Subjects: []string{"foo"}}); err != nil { t.Fatalf("Error adding stream: %v", err) } // By default, the sync subscription will be created with a MaxAckPending equal // to the internal sync queue len, which is 64K. So that should error out // and make sure we get the actual limit checkSub := func(pull bool) { var sub *nats.Subscription var err error if pull { _, err = js.PullSubscribe("foo", "bar") } else { _, err = js.SubscribeSync("foo") } if err == nil || !strings.Contains(err.Error(), "system limit of 123") { t.Fatalf("Unexpected error: %v", err) } // But it should work if we use MaxAckPending() with lower value if pull { sub, err = js.PullSubscribe("foo", "bar", nats.MaxAckPending(64)) } else { sub, err = js.SubscribeSync("foo", nats.MaxAckPending(64)) } if err != nil { t.Fatalf("Unexpected error: %v", err) } sub.Unsubscribe() } checkSub(false) checkSub(true) } func TestJetStreamClusterPlacement(t *testing.T) { // There used to be a test here that would not work because it would require // all servers in the cluster to know about each other tags. So we will simply // verify that if a stream is configured with placement and tags, the proper // "stream create" request is sent. s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() sub, err := nc.SubscribeSync("$JS.API.STREAM.CREATE.TEST") if err != nil { t.Fatalf("Error on sub: %v", err) } js.AddStream(&nats.StreamConfig{ Name: "TEST", Placement: &nats.Placement{ Tags: []string{"my_tag"}, }, }) msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting stream create request: %v", err) } var req nats.StreamConfig if err := json.Unmarshal(msg.Data, &req); err != nil { t.Fatalf("Unmarshal error: %v", err) } if req.Placement == nil { t.Fatal("Expected placement, did not get it") } if n := len(req.Placement.Tags); n != 1 { t.Fatalf("Expected 1 tag, got %v", n) } if v := req.Placement.Tags[0]; v != "my_tag" { t.Fatalf("Unexpected tag: %q", v) } } func TestJetStreamConsumerMemoryStorage(t *testing.T) { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.JetStream = true s := RunServerWithOptions(&opts) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.AddStream(&nats.StreamConfig{Name: "STR", Subjects: []string{"foo"}}); err != nil { t.Fatalf("Error adding stream: %v", err) } // Pull ephemeral consumer with memory storage. sub, err := js.PullSubscribe("foo", "", nats.ConsumerMemoryStorage()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } consInfo, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Error getting consumer info: %v", err) } if !consInfo.Config.MemoryStorage { t.Fatalf("Expected memory storage to be %v, got %+v", true, consInfo.Config.MemoryStorage) } // Create a sync subscription with an in-memory ephemeral consumer. sub, err = js.SubscribeSync("foo", nats.ConsumerMemoryStorage()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } consInfo, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Error getting consumer info: %v", err) } if !consInfo.Config.MemoryStorage { t.Fatalf("Expected memory storage to be %v, got %+v", true, consInfo.Config.MemoryStorage) } // Async subscription with an in-memory ephemeral consumer. cb := func(msg *nats.Msg) {} sub, err = js.Subscribe("foo", cb, nats.ConsumerMemoryStorage()) if err != nil { t.Fatalf("Error on subscribe: %v", err) } consInfo, err = sub.ConsumerInfo() if err != nil { t.Fatalf("Error getting consumer info: %v", err) } if !consInfo.Config.MemoryStorage { t.Fatalf("Expected memory storage to be %v, got %+v", true, consInfo.Config.MemoryStorage) } } func TestJetStreamStreamInfoWithSubjectDetails(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"test.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Publish on enough subjects to exercise the pagination payload := make([]byte, 10) for i := 0; i < 100001; i++ { _, err := js.Publish(fmt.Sprintf("test.%d", i), payload) if err != nil { t.Fatalf("Unexpected error: %v", err) } } // Check that passing a filter returns the subject details result, err := js.StreamInfo("TEST", &nats.StreamInfoRequest{SubjectsFilter: ">"}) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(result.State.Subjects) != 100001 { t.Fatalf("expected 100001 subjects in the stream, but got %d instead", len(result.State.Subjects)) } // Check that passing no filter does not return any subject details result, err = js.StreamInfo("TEST") if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(result.State.Subjects) != 0 { t.Fatalf("expected 0 subjects details from StreamInfo, but got %d instead", len(result.State.Subjects)) } } func TestStreamNameBySubject(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"test.*"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } for _, test := range []struct { name string streamName string err error }{ {name: "valid wildcard lookup", streamName: "test.*", err: nil}, {name: "valid explicit lookup", streamName: "test.a", err: nil}, {name: "lookup on not existing stream", streamName: "not.existing", err: nats.ErrNoMatchingStream}, } { stream, err := js.StreamNameBySubject(test.streamName) if err != test.err { t.Fatalf("expected %v, got %v", test.err, err) } if stream != "TEST" && err == nil { t.Fatalf("returned stream name should be 'TEST'") } } } func TestJetStreamTransform(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{ Name: "ORIGIN", Subjects: []string{"test"}, SubjectTransform: &nats.SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } err = nc.Publish("test", []byte("1")) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddStream(&nats.StreamConfig{ Subjects: []string{}, Name: "SOURCING", Sources: []*nats.StreamSource{{Name: "ORIGIN", SubjectTransforms: []nats.SubjectTransformConfig{{Source: ">", Destination: "fromtest.>"}}}}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Create a sync subscription with an in-memory ephemeral consumer. sub, err := js.SubscribeSync("fromtest.>", nats.ConsumerMemoryStorage(), nats.BindStream("SOURCING")) if err != nil { t.Fatalf("Error on subscribe: %v", err) } m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Unexpected error: %v", err) } if m.Subject != "fromtest.transformed.test" { t.Fatalf("the subject of the message doesn't match the expected fromtest.transformed.test: %s", m.Subject) } } func TestPullConsumerFetchRace(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } for i := 0; i < 3; i++ { if _, err := js.Publish("FOO.123", []byte(fmt.Sprintf("msg-%d", i))); err != nil { t.Fatalf("Unexpected error during publish: %s", err) } } sub, err := js.PullSubscribe("FOO.123", "") if err != nil { t.Fatalf("Unexpected error: %v", err) } cons, err := sub.ConsumerInfo() if err != nil { t.Fatalf("Unexpected error: %v", err) } msgs, err := sub.FetchBatch(5) if err != nil { t.Fatalf("Unexpected error: %v", err) } errCh := make(chan error) go func() { for { err := msgs.Error() if err != nil { errCh <- err return } } }() deleteErrCh := make(chan error, 1) go func() { time.Sleep(100 * time.Millisecond) if err := js.DeleteConsumer("foo", cons.Name); err != nil { deleteErrCh <- err } close(deleteErrCh) }() var i int for msg := range msgs.Messages() { if string(msg.Data) != fmt.Sprintf("msg-%d", i) { t.Fatalf("Invalid msg on index %d; expected: %s; got: %s", i, fmt.Sprintf("msg-%d", i), string(msg.Data)) } i++ } if i != 3 { t.Fatalf("Invalid number of messages received; want: %d; got: %d", 5, i) } select { case err := <-errCh: if !errors.Is(err, nats.ErrConsumerDeleted) { t.Fatalf("Expected error: %v; got: %v", nats.ErrConsumerDeleted, err) } case <-time.After(1 * time.Second): t.Fatalf("Expected error: %v; got: %v", nats.ErrConsumerDeleted, nil) } // wait until the consumer is deleted, otherwise we may close the connection // before the consumer delete response is received select { case ert, ok := <-deleteErrCh: if !ok { break } t.Fatalf("Error deleting consumer: %s", ert) case <-time.After(1 * time.Second): t.Fatalf("Expected done to be closed") } } func TestJetStreamSubscribeConsumerCreateTimeout(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() _, err = js.SubscribeSync("", nats.BindStream("foo"), nats.Context(ctx)) if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("Expected error") } } func TestJetStreamPullSubscribeFetchErrOnReconnect(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("FOO.123", "bar") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() errs := make(chan error, 1) go func() { time.Sleep(100 * time.Millisecond) errs <- nc.ForceReconnect() }() _, err = sub.Fetch(1, nats.MaxWait(time.Second)) if !errors.Is(err, nats.ErrFetchDisconnected) { t.Fatalf("Expected error: %v; got: %v", nats.ErrFetchDisconnected, err) } if err := <-errs; err != nil { t.Fatalf("Error on reconnect: %v", err) } } func TestJetStreamPullSubscribeFetchBatchErrOnReconnect(t *testing.T) { srv := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, srv) nc, js := jsClient(t, srv) defer nc.Close() _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Subjects: []string{"FOO.*"}}) if err != nil { t.Fatalf("Unexpected error: %v", err) } sub, err := js.PullSubscribe("FOO.123", "bar") if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() errs := make(chan error, 1) go func() { time.Sleep(100 * time.Millisecond) errs <- nc.ForceReconnect() }() msgs, err := sub.FetchBatch(1, nats.MaxWait(time.Second), nats.PullHeartbeat(100*time.Millisecond)) if err != nil { t.Fatalf("Unexpected error: %v", err) } for range msgs.Messages() { t.Fatalf("Expected no messages, got one") } if !errors.Is(msgs.Error(), nats.ErrFetchDisconnected) { t.Fatalf("Expected error: %v; got: %v", nats.ErrFetchDisconnected, msgs.Error()) } if err := <-errs; err != nil { t.Fatalf("Error on reconnect: %v", err) } } nats.go-1.41.0/test/json_test.go000066400000000000000000000173761477351342400165140ustar00rootroot00000000000000// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "reflect" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/encoders/builtin" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn func NewJsonEncodedConn(tl TestLogger) *nats.EncodedConn { ec, err := nats.NewEncodedConn(NewConnection(tl, TEST_PORT), nats.JSON_ENCODER) if err != nil { tl.Fatalf("Failed to create an encoded connection: %v\n", err) } return ec } func TestEncBuiltinJsonMarshalString(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) testString := "Hello World!" ec.Subscribe("json_string", func(s string) { if s != testString { t.Fatalf("Received test string of '%s', wanted '%s'\n", s, testString) } ch <- true }) ec.Publish("json_string", testString) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalEmptyString(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) ec.Subscribe("json_empty_string", func(s string) { if s != "" { t.Fatalf("Received test of '%v', wanted empty string\n", s) } ch <- true }) ec.Publish("json_empty_string", "") if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalInt(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) testN := 22 ec.Subscribe("json_int", func(n int) { if n != testN { t.Fatalf("Received test int of '%d', wanted '%d'\n", n, testN) } ch <- true }) ec.Publish("json_int", testN) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalBool(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) ec.Subscribe("json_bool", func(b bool) { if !b { t.Fatalf("Received test of '%v', wanted 'true'\n", b) } ch <- true }) ec.Publish("json_bool", true) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalNull(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() type TestType struct{} ch := make(chan bool) var testValue *TestType ec.Subscribe("json_null", func(i any) { if i != nil { t.Fatalf("Received test of '%v', wanted 'nil'\n", i) } ch <- true }) ec.Publish("json_null", testValue) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalArray(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) var a = []string{"a", "b", "c"} ec.Subscribe("json_array", func(v []string) { if !reflect.DeepEqual(v, a) { t.Fatalf("Received test of '%v', wanted '%v'\n", v, a) } ch <- true }) ec.Publish("json_array", a) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func TestEncBuiltinJsonMarshalEmptyArray(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) var a []string ec.Subscribe("json_empty_array", func(v []string) { if !reflect.DeepEqual(v, a) { t.Fatalf("Received test of '%v', wanted '%v'\n", v, a) } ch <- true }) ec.Publish("json_empty_array", a) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } type person struct { Name string Address string Age int Children map[string]*person Assets map[string]uint } func TestEncBuiltinJsonMarshalStruct(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*person) me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} me.Assets = make(map[string]uint) me.Assets["house"] = 1000 me.Assets["car"] = 100 ec.Subscribe("json_struct", func(p *person) { if !reflect.DeepEqual(p, me) { t.Fatal("Did not receive the correct struct response") } ch <- true }) ec.Publish("json_struct", me) if e := Wait(ch); e != nil { t.Fatal("Did not receive the message") } } func BenchmarkJsonMarshalStruct(b *testing.B) { me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*person) me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} encoder := &builtin.JsonEncoder{} for n := 0; n < b.N; n++ { if _, err := encoder.Encode("json_benchmark_struct_marshal", me); err != nil { b.Fatal("Couldn't serialize object", err) } } } func BenchmarkPublishJsonStruct(b *testing.B) { // stop benchmark for set-up b.StopTimer() s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(b) defer ec.Close() ch := make(chan bool) me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*person) me.Children["sam"] = &person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} ec.Subscribe("json_benchmark_struct_publish", func(p *person) { if !reflect.DeepEqual(p, me) { b.Fatalf("Did not receive the correct struct response") } ch <- true }) // resume benchmark b.StartTimer() for n := 0; n < b.N; n++ { ec.Publish("json_struct", me) if e := Wait(ch); e != nil { b.Fatal("Did not receive the message") } } } func TestEncBuiltinNotMarshableToJson(t *testing.T) { je := &builtin.JsonEncoder{} ch := make(chan bool) _, err := je.Encode("foo", ch) if err == nil { t.Fatal("Expected an error when failing encoding") } } func TestEncBuiltinFailedEncodedPublish(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewJsonEncodedConn(t) defer ec.Close() ch := make(chan bool) err := ec.Publish("foo", ch) if err == nil { t.Fatal("Expected an error trying to publish a channel") } err = ec.PublishRequest("foo", "bar", ch) if err == nil { t.Fatal("Expected an error trying to publish a channel") } var cr chan bool err = ec.Request("foo", ch, &cr, 1*time.Second) if err == nil { t.Fatal("Expected an error trying to publish a channel") } err = ec.LastError() if err != nil { t.Fatalf("Expected LastError to be nil: %q ", err) } } func TestEncBuiltinDecodeConditionals(t *testing.T) { je := &builtin.JsonEncoder{} b, err := je.Encode("foo", 22) if err != nil { t.Fatalf("Expected no error when encoding, got %v\n", err) } var foo string var bar []byte err = je.Decode("foo", b, &foo) if err != nil { t.Fatalf("Expected no error when decoding, got %v\n", err) } err = je.Decode("foo", b, &bar) if err != nil { t.Fatalf("Expected no error when decoding, got %v\n", err) } } nats.go-1.41.0/test/kv_test.go000066400000000000000000001315161477351342400161540ustar00rootroot00000000000000// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "errors" "fmt" "os" "reflect" "strconv" "strings" "testing" "time" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" ) func TestKeyValueBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST", History: 5, TTL: time.Hour}) expectOk(t, err) if kv.Bucket() != "TEST" { t.Fatalf("Expected bucket name to be %q, got %q", "TEST", kv.Bucket()) } // Simple Put r, err := kv.Put("name", []byte("derek")) expectOk(t, err) if r != 1 { t.Fatalf("Expected 1 for the revision, got %d", r) } // Simple Get e, err := kv.Get("name") expectOk(t, err) if string(e.Value()) != "derek" { t.Fatalf("Got wrong value: %q vs %q", e.Value(), "derek") } if e.Revision() != 1 { t.Fatalf("Expected 1 for the revision, got %d", e.Revision()) } // Delete err = kv.Delete("name") expectOk(t, err) _, err = kv.Get("name") expectErr(t, err, nats.ErrKeyNotFound) r, err = kv.Create("name", []byte("derek")) expectOk(t, err) if r != 3 { t.Fatalf("Expected 3 for the revision, got %d", r) } err = kv.Delete("name", nats.LastRevision(4)) expectErr(t, err) err = kv.Delete("name", nats.LastRevision(3)) expectOk(t, err) // Conditional Updates. r, err = kv.Update("name", []byte("rip"), 4) expectOk(t, err) _, err = kv.Update("name", []byte("ik"), 3) expectErr(t, err) _, err = kv.Update("name", []byte("ik"), r) expectOk(t, err) r, err = kv.Create("age", []byte("22")) expectOk(t, err) _, err = kv.Update("age", []byte("33"), r) expectOk(t, err) // Status status, err := kv.Status() expectOk(t, err) if status.History() != 5 { t.Fatalf("expected history of 5 got %d", status.History()) } if status.Bucket() != "TEST" { t.Fatalf("expected bucket TEST got %v", status.Bucket()) } if status.TTL() != time.Hour { t.Fatalf("expected 1 hour TTL got %v", status.TTL()) } if status.Values() != 7 { t.Fatalf("expected 7 values got %d", status.Values()) } if status.BackingStore() != "JetStream" { t.Fatalf("invalid backing store kind %s", status.BackingStore()) } kvs := status.(*nats.KeyValueBucketStatus) si := kvs.StreamInfo() if si == nil { t.Fatalf("StreamInfo not received") } } func TestKeyValueHistory(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "LIST", History: 10}) expectOk(t, err) for i := 0; i < 50; i++ { age := strconv.FormatUint(uint64(i+22), 10) _, err := kv.Put("age", []byte(age)) expectOk(t, err) } vl, err := kv.History("age") expectOk(t, err) if len(vl) != 10 { t.Fatalf("Expected %d values, got %d", 10, len(vl)) } for i, v := range vl { if v.Key() != "age" { t.Fatalf("Expected key of %q, got %q", "age", v.Key()) } if v.Revision() != uint64(i+41) { // History of 10, sent 50.. t.Fatalf("Expected revision of %d, got %d", i+41, v.Revision()) } age, err := strconv.Atoi(string(v.Value())) expectOk(t, err) if age != i+62 { t.Fatalf("Expected data value of %d, got %d", i+22, age) } } } func TestKeyValueWatch(t *testing.T) { expectUpdateF := func(t *testing.T, watcher nats.KeyWatcher) func(key, value string, revision uint64) { return func(key, value string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Key() != key || string(v.Value()) != value || v.Revision() != revision { t.Fatalf("Did not get expected: %q %q %d vs %q %q %d", v.Key(), string(v.Value()), v.Revision(), key, value, revision) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectDeleteF := func(t *testing.T, watcher nats.KeyWatcher) func(key string, revision uint64) { return func(key string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Operation() != nats.KeyValueDelete { t.Fatalf("Expected a delete operation but got %+v", v) } if v.Revision() != revision { t.Fatalf("Did not get expected revision: %d vs %d", revision, v.Revision()) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectPurgeF := func(t *testing.T, watcher nats.KeyWatcher) func(key string, revision uint64) { return func(key string, revision uint64) { t.Helper() select { case v := <-watcher.Updates(): if v.Operation() != nats.KeyValuePurge { t.Fatalf("Expected a delete operation but got %+v", v) } if v.Revision() != revision { t.Fatalf("Did not get expected revision: %d vs %d", revision, v.Revision()) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectInitDoneF := func(t *testing.T, watcher nats.KeyWatcher) func() { return func() { t.Helper() select { case v := <-watcher.Updates(): if v != nil { t.Fatalf("Did not get expected: %+v", v) } case <-time.After(time.Second): t.Fatalf("Did not receive a init done like expected") } } } t.Run("default watcher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) watcher, err := kv.WatchAll() expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // Make sure we already got an initial value marker. expectInitDone() kv.Create("name", []byte("derek")) expectUpdate("name", "derek", 1) kv.Put("name", []byte("rip")) expectUpdate("name", "rip", 2) kv.Put("name", []byte("ik")) expectUpdate("name", "ik", 3) kv.Put("age", []byte("22")) expectUpdate("age", "22", 4) kv.Put("age", []byte("33")) expectUpdate("age", "33", 5) kv.Delete("age") expectDelete("age", 6) // Stop first watcher. watcher.Stop() // Now try wildcard matching and make sure we only get last value when starting. kv.Put("t.name", []byte("rip")) kv.Put("t.name", []byte("ik")) kv.Put("t.age", []byte("22")) kv.Put("t.age", []byte("44")) watcher, err = kv.Watch("t.*") expectOk(t, err) expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectUpdate("t.name", "ik", 8) expectUpdate("t.age", "44", 10) expectInitDone() watcher.Stop() // test watcher with multiple filters watcher, err = kv.WatchFiltered([]string{"t.name", "name"}) expectOk(t, err) expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectPurge := expectPurgeF(t, watcher) expectUpdate("name", "ik", 3) expectUpdate("t.name", "ik", 8) expectInitDone() err = kv.Purge("name") expectOk(t, err) expectPurge("name", 11) defer watcher.Stop() }) t.Run("watcher with history included", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH", History: 64}) expectOk(t, err) kv.Create("name", []byte("derek")) kv.Put("name", []byte("rip")) kv.Put("name", []byte("ik")) kv.Put("age", []byte("22")) kv.Put("age", []byte("33")) kv.Delete("age") // when using UpdatesOnly(), IncludeHistory() is not allowed if _, err := kv.WatchAll(nats.IncludeHistory(), nats.UpdatesOnly()); !strings.Contains(err.Error(), "updates only can not be used with include history") { t.Fatalf("Expected error to contain %q, got %q", "updates only can not be used with include history", err) } watcher, err := kv.WatchAll(nats.IncludeHistory()) expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) expectUpdate("name", "derek", 1) expectUpdate("name", "rip", 2) expectUpdate("name", "ik", 3) expectUpdate("age", "22", 4) expectUpdate("age", "33", 5) expectDelete("age", 6) expectInitDone() kv.Put("name", []byte("pp")) expectUpdate("name", "pp", 7) // Stop first watcher. watcher.Stop() kv.Put("t.name", []byte("rip")) kv.Put("t.name", []byte("ik")) kv.Put("t.age", []byte("22")) kv.Put("t.age", []byte("44")) // try wildcard watcher and make sure we get all historical values watcher, err = kv.Watch("t.*", nats.IncludeHistory()) expectOk(t, err) defer watcher.Stop() expectInitDone = expectInitDoneF(t, watcher) expectUpdate = expectUpdateF(t, watcher) expectUpdate("t.name", "rip", 8) expectUpdate("t.name", "ik", 9) expectUpdate("t.age", "22", 10) expectUpdate("t.age", "44", 11) expectInitDone() kv.Put("t.name", []byte("pp")) expectUpdate("t.name", "pp", 12) }) t.Run("watcher with updates only", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH", History: 64}) expectOk(t, err) kv.Create("name", []byte("derek")) kv.Put("name", []byte("rip")) kv.Put("age", []byte("22")) // when using UpdatesOnly(), IncludeHistory() is not allowed if _, err := kv.WatchAll(nats.UpdatesOnly(), nats.IncludeHistory()); !strings.Contains(err.Error(), "include history can not be used with updates only") { t.Fatalf("Expected error to contain %q, got %q", "include history can not be used with updates only", err) } watcher, err := kv.WatchAll(nats.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // now update some keys and expect updates kv.Put("name", []byte("pp")) expectUpdate("name", "pp", 4) kv.Put("age", []byte("44")) expectUpdate("age", "44", 5) kv.Delete("age") expectDelete("age", 6) // Stop first watcher. watcher.Stop() kv.Put("t.name", []byte("rip")) kv.Put("t.name", []byte("ik")) kv.Put("t.age", []byte("22")) kv.Put("t.age", []byte("44")) // try wildcard watcher and make sure we do not get any values initially watcher, err = kv.Watch("t.*", nats.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate = expectUpdateF(t, watcher) // update some keys and expect updates kv.Put("t.name", []byte("pp")) expectUpdate("t.name", "pp", 11) kv.Put("t.age", []byte("66")) expectUpdate("t.age", "66", 12) }) t.Run("invalid watchers", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // empty keys _, err = kv.Watch("") expectErr(t, err, nats.ErrInvalidKey) // invalid key _, err = kv.Watch("a.>.b") expectErr(t, err, nats.ErrInvalidKey) _, err = kv.Watch("foo.") expectErr(t, err, nats.ErrInvalidKey) }) t.Run("filtered watch with no filters", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // this should behave like WatchAll watcher, err := kv.WatchFiltered([]string{}) expectOk(t, err) defer watcher.Stop() expectInitDone := expectInitDoneF(t, watcher) expectUpdate := expectUpdateF(t, watcher) expectDelete := expectDeleteF(t, watcher) // Make sure we already got an initial value marker. expectInitDone() _, err = kv.Create("name", []byte("derek")) expectOk(t, err) expectUpdate("name", "derek", 1) _, err = kv.Put("name", []byte("rip")) expectOk(t, err) expectUpdate("name", "rip", 2) _, err = kv.Put("name", []byte("ik")) expectOk(t, err) expectUpdate("name", "ik", 3) _, err = kv.Put("age", []byte("22")) expectOk(t, err) expectUpdate("age", "22", 4) _, err = kv.Put("age", []byte("33")) expectOk(t, err) expectUpdate("age", "33", 5) expectOk(t, kv.Delete("age")) expectDelete("age", 6) }) t.Run("stop watcher should not block", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) watcher, err := kv.WatchAll() expectOk(t, err) expectInitDone := expectInitDoneF(t, watcher) expectInitDone() err = watcher.Stop() expectOk(t, err) select { case _, ok := <-watcher.Updates(): if ok { t.Fatalf("Expected channel to be closed") } case <-time.After(100 * time.Millisecond): t.Fatalf("Stop watcher did not return") } }) } func TestKeyValueWatchContext(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCHCTX"}) expectOk(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() watcher, err := kv.WatchAll(nats.Context(ctx)) expectOk(t, err) defer watcher.Stop() // Trigger unsubscribe internally. cancel() // Wait for a bit for unsubscribe to be done. time.Sleep(500 * time.Millisecond) // Stopping watch that is already stopped via cancellation propagation is an error. err = watcher.Stop() if err == nil || err != nats.ErrBadSubscription { t.Errorf("Expected invalid subscription, got: %v", err) } } func TestKeyValueWatchContextUpdates(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCHCTX"}) expectOk(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() watcher, err := kv.WatchAll(nats.Context(ctx)) expectOk(t, err) defer watcher.Stop() // Pull the initial state done marker which is nil. select { case v := <-watcher.Updates(): if v != nil { t.Fatalf("Expected nil marker, got %+v", v) } case <-time.After(time.Second): t.Fatalf("Did not receive nil marker like expected") } // Fire a timer and cancel the context after 250ms. time.AfterFunc(250*time.Millisecond, cancel) // Make sure canceling will break us out here. select { case <-watcher.Updates(): case <-time.After(5 * time.Second): t.Fatalf("Did not break out like expected") } } func TestKeyValueBindStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) // Now bind to it.. _, err = js.KeyValue("WATCH") expectOk(t, err) // Make sure we can't bind to a non-kv style stream. // We have some protection with stream name prefix. _, err = js.AddStream(&nats.StreamConfig{ Name: "KV_TEST", Subjects: []string{"foo"}, }) expectOk(t, err) _, err = js.KeyValue("TEST") expectErr(t, err) if err != nats.ErrBadBucket { t.Fatalf("Expected %v but got %v", nats.ErrBadBucket, err) } } func TestKeyValueDeleteStore(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "WATCH"}) expectOk(t, err) err = js.DeleteKeyValue("WATCH") expectOk(t, err) _, err = js.KeyValue("WATCH") expectErr(t, err) } func TestKeyValueDeleteVsPurge(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(key, []byte(value)) expectOk(t, err) } // Put in a few names and ages. put("name", "derek") put("age", "22") put("name", "ivan") put("age", "33") put("name", "rip") put("age", "44") kv.Delete("age") entries, err := kv.History("age") expectOk(t, err) // Expect three entries and delete marker. if len(entries) != 4 { t.Fatalf("Expected 4 entries for age after delete, got %d", len(entries)) } err = kv.Purge("name", nats.LastRevision(4)) expectErr(t, err) err = kv.Purge("name", nats.LastRevision(5)) expectOk(t, err) // Check marker e, err := kv.Get("name") expectErr(t, err, nats.ErrKeyNotFound) if e != nil { t.Fatalf("Expected a nil entry but got %v", e) } entries, err = kv.History("name") expectOk(t, err) if len(entries) != 1 { t.Fatalf("Expected only 1 entry for age after delete, got %d", len(entries)) } // Make sure history also reports the purge operation. if e := entries[0]; e.Operation() != nats.KeyValuePurge { t.Fatalf("Expected a purge operation but got %v", e.Operation()) } } func TestKeyValueDeleteTombstones(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(key, []byte(value)) expectOk(t, err) } v := strings.Repeat("ABC", 33) for i := 1; i <= 100; i++ { put(fmt.Sprintf("key-%d", i), v) } // Now delete them. for i := 1; i <= 100; i++ { err := kv.Delete(fmt.Sprintf("key-%d", i)) expectOk(t, err) } // Now cleanup. err = kv.PurgeDeletes(nats.DeleteMarkersOlderThan(-1)) expectOk(t, err) si, err := js.StreamInfo("KV_KVS") expectOk(t, err) if si.State.Msgs != 0 { t.Fatalf("Expected no stream msgs to be left, got %d", si.State.Msgs) } // Try with context ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() err = kv.PurgeDeletes(nats.Context(ctx)) expectOk(t, err) } func TestKeyValuePurgeDeletesMarkerThreshold(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KVS", History: 10}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(key, []byte(value)) expectOk(t, err) } put("foo", "foo1") put("bar", "bar1") put("foo", "foo2") err = kv.Delete("foo") expectOk(t, err) time.Sleep(200 * time.Millisecond) err = kv.Delete("bar") expectOk(t, err) err = kv.PurgeDeletes(nats.DeleteMarkersOlderThan(100 * time.Millisecond)) expectOk(t, err) // The key foo should have been completely cleared of the data // and the delete marker. fooEntries, err := kv.History("foo") if err != nats.ErrKeyNotFound { t.Fatalf("Expected all entries for key foo to be gone, got err=%v entries=%v", err, fooEntries) } barEntries, err := kv.History("bar") expectOk(t, err) if len(barEntries) != 1 { t.Fatalf("Expected 1 entry, got %v", barEntries) } if e := barEntries[0]; e.Operation() != nats.KeyValueDelete { t.Fatalf("Unexpected entry: %+v", e) } } func TestKeyValueKeys(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KVS", History: 2}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(key, []byte(value)) expectOk(t, err) } _, err = kv.Keys() expectErr(t, err, nats.ErrNoKeysFound) // Put in a few names and ages. put("name", "derek") put("age", "22") put("country", "US") put("name", "ivan") put("age", "33") put("country", "US") put("name", "rip") put("age", "44") put("country", "MT") keys, err := kv.Keys() expectOk(t, err) kmap := make(map[string]struct{}) for _, key := range keys { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 3 { t.Fatalf("Expected 3 total keys, got %d", len(kmap)) } expected := map[string]struct{}{ "name": struct{}{}, "age": struct{}{}, "country": struct{}{}, } if !reflect.DeepEqual(kmap, expected) { t.Fatalf("Expected %+v but got %+v", expected, kmap) } // Make sure delete and purge do the right thing and not return the keys. err = kv.Delete("name") expectOk(t, err) err = kv.Purge("country") expectOk(t, err) keys, err = kv.Keys() expectOk(t, err) kmap = make(map[string]struct{}) for _, key := range keys { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 1 { t.Fatalf("Expected 1 total key, got %d", len(kmap)) } if _, ok := kmap["age"]; !ok { t.Fatalf("Expected %q to be only key present", "age") } } func TestKeyValueListKeys(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KVS", History: 2}) expectOk(t, err) put := func(key, value string) { t.Helper() _, err := kv.Put(key, []byte(value)) expectOk(t, err) } // Put in a few names and ages. put("name", "derek") put("age", "22") put("country", "US") put("name", "ivan") put("age", "33") put("country", "US") put("name", "rip") put("age", "44") put("country", "MT") keys, err := kv.ListKeys() expectOk(t, err) kmap := make(map[string]struct{}) for key := range keys.Keys() { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 3 { t.Fatalf("Expected 3 total keys, got %d", len(kmap)) } expected := map[string]struct{}{ "name": struct{}{}, "age": struct{}{}, "country": struct{}{}, } if !reflect.DeepEqual(kmap, expected) { t.Fatalf("Expected %+v but got %+v", expected, kmap) } // Make sure delete and purge do the right thing and not return the keys. err = kv.Delete("name") expectOk(t, err) err = kv.Purge("country") expectOk(t, err) keys, err = kv.ListKeys() expectOk(t, err) kmap = make(map[string]struct{}) for key := range keys.Keys() { if _, ok := kmap[key]; ok { t.Fatalf("Already saw %q", key) } kmap[key] = struct{}{} } if len(kmap) != 1 { t.Fatalf("Expected 1 total key, got %d", len(kmap)) } if _, ok := kmap["age"]; !ok { t.Fatalf("Expected %q to be only key present", "age") } } func TestKeyValueCrossAccounts(t *testing.T) { conf := createConfFile(t, []byte(` jetstream: enabled accounts: { A: { users: [ {user: a, password: a} ] jetstream: enabled exports: [ {service: '$JS.API.>' } {service: '$KV.>'} {stream: 'accI.>'} ] }, I: { users: [ {user: i, password: i} ] imports: [ {service: {account: A, subject: '$JS.API.>'}, to: 'fromA.>' } {service: {account: A, subject: '$KV.>'}, to: 'fromA.$KV.>' } {stream: {subject: 'accI.>', account: A}} ] } }`)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) watchNext := func(w nats.KeyWatcher) nats.KeyValueEntry { t.Helper() select { case e := <-w.Updates(): return e case <-time.After(time.Second): t.Fatal("Fail to get the next update") } return nil } nc1, js1 := jsClient(t, s, nats.UserInfo("a", "a")) defer nc1.Close() kv1, err := js1.CreateKeyValue(&nats.KeyValueConfig{Bucket: "Map", History: 10}) if err != nil { t.Fatalf("Error creating kv store: %v", err) } w1, err := kv1.Watch("map") if err != nil { t.Fatalf("Error creating watcher: %v", err) } if e := watchNext(w1); e != nil { t.Fatalf("Expected nil entry, got %+v", e) } nc2, err := nats.Connect(s.ClientURL(), nats.UserInfo("i", "i"), nats.CustomInboxPrefix("accI")) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc2.Close() js2, err := nc2.JetStream(nats.APIPrefix("fromA")) if err != nil { t.Fatalf("Error getting jetstream context: %v", err) } kv2, err := js2.CreateKeyValue(&nats.KeyValueConfig{Bucket: "Map", History: 10}) if err != nil { t.Fatalf("Error creating kv store: %v", err) } w2, err := kv2.Watch("map") if err != nil { t.Fatalf("Error creating watcher: %v", err) } if e := watchNext(w2); e != nil { t.Fatalf("Expected nil entry, got %+v", e) } // Do a Put from kv2 rev, err := kv2.Put("map", []byte("value")) if err != nil { t.Fatalf("Error on put: %v", err) } // Get from kv1 e, err := kv1.Get("map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: +%v", e) } // Get from kv2 e, err = kv2.Get("map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: +%v", e) } // Watcher 1 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: %+v", e) } // Watcher 2 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "value" { t.Fatalf("Unexpected entry: %+v", e) } // Try an update form kv2 if _, err := kv2.Update("map", []byte("updated"), rev); err != nil { t.Fatalf("Failed to update: %v", err) } // Get from kv1 e, err = kv1.Get("map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: +%v", e) } // Get from kv2 e, err = kv2.Get("map") if err != nil { t.Fatalf("Error on get: %v", err) } if e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: +%v", e) } // Watcher 1 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: %+v", e) } // Watcher 2 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { t.Fatalf("Unexpected entry: %+v", e) } // Purge from kv2 if err := kv2.Purge("map"); err != nil { t.Fatalf("Error on purge: %v", err) } // Check purge ok from w1 if e := watchNext(w1); e == nil || e.Operation() != nats.KeyValuePurge { t.Fatalf("Unexpected entry: %+v", e) } // Check purge ok from w2 if e := watchNext(w2); e == nil || e.Operation() != nats.KeyValuePurge { t.Fatalf("Unexpected entry: %+v", e) } // Delete purge records from kv2 if err := kv2.PurgeDeletes(nats.DeleteMarkersOlderThan(-1)); err != nil { t.Fatalf("Error on purge deletes: %v", err) } // Check all gone from js1 if si, err := js1.StreamInfo("KV_Map"); err != nil || si == nil || si.State.Msgs != 0 { t.Fatalf("Error getting stream info: err=%v si=%+v", err, si) } // Delete key from kv2 if err := kv2.Delete("map"); err != nil { t.Fatalf("Error on delete: %v", err) } // Check key gone from kv1 if e, err := kv1.Get("map"); err != nats.ErrKeyNotFound || e != nil { t.Fatalf("Expected key not found, got err=%v e=%+v", err, e) } } func TestKeyValueDuplicatesWindow(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() checkWindow := func(ttl, expectedDuplicates time.Duration) { t.Helper() _, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST", History: 5, TTL: ttl}) expectOk(t, err) defer js.DeleteKeyValue("TEST") si, err := js.StreamInfo("KV_TEST") if err != nil { t.Fatalf("StreamInfo error: %v", err) } if si.Config.Duplicates != expectedDuplicates { t.Fatalf("Expected duplicates to be %v, got %v", expectedDuplicates, si.Config.Duplicates) } } checkWindow(0, 2*time.Minute) checkWindow(time.Hour, 2*time.Minute) checkWindow(5*time.Second, 5*time.Second) } // Helpers func client(t *testing.T, s *server.Server, opts ...nats.Option) *nats.Conn { t.Helper() nc, err := nats.Connect(s.ClientURL(), opts...) if err != nil { t.Fatalf("Unexpected error: %v", err) } return nc } func jsClient(t *testing.T, s *server.Server, opts ...nats.Option) (*nats.Conn, nats.JetStreamContext) { t.Helper() nc := client(t, s, opts...) js, err := nc.JetStream(nats.MaxWait(10 * time.Second)) if err != nil { t.Fatalf("Unexpected error getting JetStream context: %v", err) } return nc, js } func expectOk(t *testing.T, err error) { t.Helper() if err != nil { t.Fatalf("Unexpected error: %v", err) } } func expectErr(t *testing.T, err error, expected ...error) { t.Helper() if err == nil { t.Fatalf("Expected error but got none") } if len(expected) == 0 { return } for _, e := range expected { if errors.Is(err, e) { return } } t.Fatalf("Expected one of %+v, got '%v'", expected, err) } func TestListKeyValueStores(t *testing.T) { tests := []struct { name string bucketsNum int }{ { name: "single page", bucketsNum: 5, }, { name: "multi page", bucketsNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // create stream without the chunk subject, but with KV_ prefix _, err := js.AddStream(&nats.StreamConfig{Name: "KV_FOO", Subjects: []string{"FOO.*"}}) expectOk(t, err) // create stream with chunk subject, but without "KV_" prefix _, err = js.AddStream(&nats.StreamConfig{Name: "FOO", Subjects: []string{"$KV.ABC.>"}}) expectOk(t, err) for i := 0; i < test.bucketsNum; i++ { _, err = js.CreateKeyValue(&nats.KeyValueConfig{Bucket: fmt.Sprintf("KVS_%d", i), MaxBytes: 1024}) expectOk(t, err) } names := make([]string, 0) for name := range js.KeyValueStoreNames() { if strings.HasPrefix(name, "KV_") { t.Fatalf("Expected name without KV_ prefix, got %q", name) } names = append(names, name) } if len(names) != test.bucketsNum { t.Fatalf("Invalid number of stream names; want: %d; got: %d", test.bucketsNum, len(names)) } infos := make([]nats.KeyValueStatus, 0) for info := range js.KeyValueStores() { infos = append(infos, info) } if len(infos) != test.bucketsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.bucketsNum, len(infos)) } }) } } func TestKeyValueMirrorCrossDomains(t *testing.T) { keyExists := func(t *testing.T, kv nats.KeyValue, key string, expected string) nats.KeyValueEntry { var e nats.KeyValueEntry var err error checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { e, err = kv.Get(key) if err != nil { return err } if string(e.Value()) != expected { return fmt.Errorf("Expected value to be %q, got %q", expected, e.Value()) } return nil }) return e } keyDeleted := func(t *testing.T, kv nats.KeyValue, key string) { checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { _, err := kv.Get(key) if err == nil { return errors.New("Expected key to be gone") } if !errors.Is(err, nats.ErrKeyNotFound) { return err } return nil }) } conf := createConfFile(t, []byte(` server_name: HUB listen: 127.0.0.1:-1 jetstream: { domain: HUB } leafnodes { listen: 127.0.0.1:7422 } }`)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) lconf := createConfFile(t, []byte(` server_name: LEAF listen: 127.0.0.1:-1 jetstream: { domain:LEAF } leafnodes { remotes = [ { url: "leaf://127.0.0.1" } ] } }`)) defer os.Remove(lconf) ln, _ := RunServerWithConfig(lconf) defer shutdownJSServerAndRemoveStorage(t, ln) // Create main KV on HUB nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST"}) expectOk(t, err) _, err = kv.PutString("name", "derek") expectOk(t, err) _, err = kv.PutString("age", "22") expectOk(t, err) _, err = kv.PutString("v", "v") expectOk(t, err) err = kv.Delete("v") expectOk(t, err) lnc, ljs := jsClient(t, ln) defer lnc.Close() // Capture cfg so we can make sure it does not change. // NOTE: We use different name to test all possibilities, etc, but in practice for truly nomadic applications // this should be named the same, e.g. TEST. cfg := &nats.KeyValueConfig{ Bucket: "MIRROR", Mirror: &nats.StreamSource{ Name: "TEST", Domain: "HUB", }, } ccfg := *cfg _, err = ljs.CreateKeyValue(cfg) expectOk(t, err) if !reflect.DeepEqual(cfg, &ccfg) { t.Fatalf("Did not expect config to be altered: %+v vs %+v", cfg, ccfg) } si, err := ljs.StreamInfo("KV_MIRROR") expectOk(t, err) // Make sure mirror direct set. if !si.Config.MirrorDirect { t.Fatalf("Expected mirror direct to be set") } // Make sure we sync. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { si, err := ljs.StreamInfo("KV_MIRROR") expectOk(t, err) if si.State.Msgs == 3 { return nil } return fmt.Errorf("Did not get synched messages: %d", si.State.Msgs) }) // Bind locally from leafnode and make sure both get and put work. mkv, err := ljs.KeyValue("MIRROR") expectOk(t, err) _, err = mkv.PutString("name", "rip") expectOk(t, err) _, err = mkv.PutString("v", "vv") expectOk(t, err) // wait for the key to be propagated to the mirror e := keyExists(t, kv, "v", "vv") if e.Operation() != nats.KeyValuePut { t.Fatalf("Got wrong value: %q vs %q", e.Operation(), nats.KeyValuePut) } err = mkv.Delete("v") expectOk(t, err) keyDeleted(t, kv, "v") keyExists(t, kv, "name", "rip") // Also make sure we can create a watcher on the mirror KV. watcher, err := mkv.WatchAll() expectOk(t, err) defer watcher.Stop() // Bind through leafnode connection but to origin KV. rjs, err := lnc.JetStream(nats.Domain("HUB")) expectOk(t, err) rkv, err := rjs.KeyValue("TEST") expectOk(t, err) _, err = rkv.PutString("name", "ivan") expectOk(t, err) e = keyExists(t, mkv, "name", "ivan") _, err = rkv.PutString("v", "vv") expectOk(t, err) keyExists(t, mkv, "v", "vv") if e.Operation() != nats.KeyValuePut { t.Fatalf("Got wrong value: %q vs %q", e.Operation(), nats.KeyValuePut) } err = rkv.Delete("v") expectOk(t, err) keyDeleted(t, mkv, "v") // Shutdown cluster and test get still work. shutdownJSServerAndRemoveStorage(t, s) keyExists(t, mkv, "name", "ivan") } func TestKeyValueNonDirectGet(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST"}) if err != nil { t.Fatalf("Error creating store: %v", err) } si, err := js.StreamInfo("KV_TEST") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if !si.Config.AllowDirect { t.Fatal("Expected allow direct to be set, it was not") } cfg := si.Config cfg.AllowDirect = false if _, err := js.UpdateStream(&cfg); err != nil { t.Fatalf("Error updating stream: %v", err) } kvi, err := js.KeyValue("TEST") if err != nil { t.Fatalf("Error getting kv: %v", err) } if _, err := kvi.PutString("key1", "val1"); err != nil { t.Fatalf("Error putting key: %v", err) } if _, err := kvi.PutString("key2", "val2"); err != nil { t.Fatalf("Error putting key: %v", err) } if v, err := kvi.Get("key2"); err != nil || string(v.Value()) != "val2" { t.Fatalf("Error on get: v=%+v err=%v", v, err) } if v, err := kvi.GetRevision("key1", 1); err != nil || string(v.Value()) != "val1" { t.Fatalf("Error on get revisiong: v=%+v err=%v", v, err) } if v, err := kvi.GetRevision("key1", 2); err == nil { t.Fatalf("Expected error, got %+v", v) } } func TestKeyValueRePublish(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() if _, err := js.CreateKeyValue(&nats.KeyValueConfig{ Bucket: "TEST_UPDATE", }); err != nil { t.Fatalf("Error creating store: %v", err) } // This is expected to fail since server does not support as of now // the update of RePublish. if _, err := js.CreateKeyValue(&nats.KeyValueConfig{ Bucket: "TEST_UPDATE", RePublish: &nats.RePublish{Source: ">", Destination: "bar.>"}, }); err == nil { t.Fatal("Expected failure, did not get one") } kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ Bucket: "TEST", RePublish: &nats.RePublish{Source: ">", Destination: "bar.>"}, }) if err != nil { t.Fatalf("Error creating store: %v", err) } si, err := js.StreamInfo("KV_TEST") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if si.Config.RePublish == nil { t.Fatal("Expected republish to be set, it was not") } sub, err := nc.SubscribeSync("bar.>") if err != nil { t.Fatalf("Error on sub: %v", err) } if _, err := kv.Put("foo", []byte("value")); err != nil { t.Fatalf("Error on put: %v", err) } msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error on next: %v", err) } if v := string(msg.Data); v != "value" { t.Fatalf("Unexpected value: %s", v) } // The message should also have a header with the actual subject kvSubjectsPreTmpl := "$KV.%s." expected := fmt.Sprintf(kvSubjectsPreTmpl, "TEST") + "foo" if v := msg.Header.Get(nats.JSSubject); v != expected { t.Fatalf("Expected subject header %q, got %q", expected, v) } } func TestKeyValueMirrorDirectGet(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } _, err = js.AddStream(&nats.StreamConfig{ Name: "MIRROR", Mirror: &nats.StreamSource{Name: "KV_TEST"}, MirrorDirect: true, }) if err != nil { t.Fatalf("Error creating mirror: %v", err) } for i := 0; i < 100; i++ { key := fmt.Sprintf("KEY.%d", i) if _, err := kv.PutString(key, "42"); err != nil { t.Fatalf("Error adding key: %v", err) } } // Make sure all gets work. for i := 0; i < 100; i++ { if _, err := kv.Get("KEY.22"); err != nil { t.Fatalf("Got error getting key: %v", err) } } } func TestKeyValueCreate(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ Bucket: "TEST", Description: "Test KV", MaxValueSize: 128, History: 10, TTL: 1 * time.Hour, MaxBytes: 1024, Storage: nats.FileStorage, }) if err != nil { t.Fatalf("Error creating kv: %v", err) } expectedStreamConfig := nats.StreamConfig{ Name: "KV_TEST", Description: "Test KV", Subjects: []string{"$KV.TEST.>"}, MaxMsgs: -1, MaxBytes: 1024, Discard: nats.DiscardNew, MaxAge: 1 * time.Hour, MaxMsgsPerSubject: 10, MaxMsgSize: 128, Storage: nats.FileStorage, DenyDelete: true, AllowRollup: true, AllowDirect: true, MaxConsumers: -1, Replicas: 1, Duplicates: 2 * time.Minute, } si, err := js.StreamInfo("KV_TEST") if err != nil { t.Fatalf("Error getting stream info: %v", err) } // Metadata is set by the server, so we need to set it here. expectedStreamConfig.Metadata = si.Config.Metadata if !reflect.DeepEqual(si.Config, expectedStreamConfig) { t.Fatalf("Expected stream config to be %+v, got %+v", expectedStreamConfig, si.Config) } _, err = kv.Create("key", []byte("1")) if err != nil { t.Fatalf("Error creating key: %v", err) } _, err = kv.Create("key", []byte("1")) expected := "nats: wrong last sequence: 1: key exists" if err.Error() != expected { t.Fatalf("Expected %q, got: %v", expected, err) } if !errors.Is(err, nats.ErrKeyExists) { t.Fatalf("Expected ErrKeyExists, got: %v", err) } aerr := &nats.APIError{} if !errors.As(err, &aerr) { t.Fatalf("Expected APIError, got: %v", err) } if aerr.Description != "wrong last sequence: 1" { t.Fatalf("Unexpected APIError message, got: %v", aerr.Description) } if aerr.ErrorCode != 10071 { t.Fatalf("Unexpected error code, got: %v", aerr.ErrorCode) } if aerr.Code != nats.ErrKeyExists.APIError().Code { t.Fatalf("Unexpected error code, got: %v", aerr.Code) } var kerr nats.JetStreamError if !errors.As(err, &kerr) { t.Fatalf("Expected KeyValueError, got: %v", err) } if kerr.APIError().ErrorCode != 10071 { t.Fatalf("Unexpected error code, got: %v", kerr.APIError().ErrorCode) } } func TestKeyValueSourcing(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kvA, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "A"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } _, err = kvA.Create("keyA", []byte("1")) if err != nil { t.Fatalf("Error creating key: %v", err) } if _, err := kvA.Get("keyA"); err != nil { t.Fatalf("Got error getting keyA from A: %v", err) } kvB, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "B"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } _, err = kvB.Create("keyB", []byte("1")) if err != nil { t.Fatalf("Error creating key: %v", err) } kvC, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "C", Sources: []*nats.StreamSource{{Name: "A"}, {Name: "B"}}}) if err != nil { t.Fatalf("Error creating kv: %v", err) } i := 0 for { status, err := kvC.Status() if err != nil { t.Fatalf("Error getting bucket status: %v", err) } if status.Values() == 2 { break } else { i++ if i > 10 { t.Fatalf("Error sourcing bucket does not contain the expected number of values") } } time.Sleep(100 * time.Millisecond) } if _, err := kvC.Get("keyA"); err != nil { t.Fatalf("Got error getting keyA from C: %v", err) } if _, err := kvC.Get("keyB"); err != nil { t.Fatalf("Got error getting keyB from C: %v", err) } } func TestKeyValueCompression(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ Bucket: "A", Compression: true, }) if err != nil { t.Fatalf("Error creating kv: %v", err) } status, err := kv.Status() if err != nil { t.Fatalf("Error getting bucket status: %v", err) } if !status.IsCompressed() { t.Fatalf("Expected bucket to be compressed") } kvStream, err := js.StreamInfo("KV_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if kvStream.Config.Compression != nats.S2Compression { t.Fatalf("Expected stream to be compressed with S2") } } func TestListKeysFromPurgedStream(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() js, err := nc.JetStream(nats.MaxWait(100 * time.Millisecond)) if err != nil { t.Fatalf("Error getting jetstream context: %v", err) } kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "A"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } for i := range 10000 { if _, err := kv.Put(fmt.Sprintf("key-%d", i), []byte("val")); err != nil { t.Fatalf("Error putting key: %v", err) } } // purge the stream after a bit go func() { time.Sleep(10 * time.Millisecond) if err := js.PurgeStream("KV_A"); err != nil { t.Logf("Error purging stream: %v", err) } }() keys, err := kv.ListKeys() if err != nil { t.Fatalf("Error listing keys: %v", err) } // there should not be a deadlock here for { select { case _, ok := <-keys.Keys(): if !ok { return } case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for keys") } } } func TestKeyValueWatcherStopTimer(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() js, err := nc.JetStream(nats.MaxWait(100 * time.Millisecond)) if err != nil { t.Fatalf("Error getting jetstream context: %v", err) } kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST"}) if err != nil { t.Fatalf("Error creating kv: %v", err) } for i := range 1000 { if _, err := kv.Put(fmt.Sprintf("key-%d", i), []byte("val")); err != nil { t.Fatalf("Error putting key: %v", err) } } w, err := kv.WatchAll() if err != nil { t.Fatalf("Error creating watcher: %v", err) } if err := w.Stop(); err != nil { t.Fatalf("Error stopping watcher: %v", err) } time.Sleep(500 * time.Millisecond) } nats.go-1.41.0/test/main_test.go000066400000000000000000000013011477351342400164440ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "testing" "go.uber.org/goleak" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } nats.go-1.41.0/test/nats_iter_test.go000066400000000000000000000165301477351342400175220ustar00rootroot00000000000000// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build go1.23 package test import ( "errors" "fmt" "os" "sync" "testing" "time" "github.com/nats-io/nats.go" ) func TestSubscribeIterator(t *testing.T) { t.Run("with timeout", func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.PermissionErrOnSubscribe(true)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } defer sub.Unsubscribe() total := 100 for i := 0; i < total/2; i++ { if err := nc.Publish("foo", []byte("Hello")); err != nil { t.Fatalf("Error on publish: %v", err) } } // publish some more messages asynchronously errCh := make(chan error, 1) go func() { for i := 0; i < total/2; i++ { if err := nc.Publish("foo", []byte("Hello")); err != nil { errCh <- err return } time.Sleep(10 * time.Millisecond) } close(errCh) }() received := 0 for _, err := range sub.MsgsTimeout(100 * time.Millisecond) { if err != nil { if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Error on subscribe: %v", err) } break } else { received++ } } if received != total { t.Fatalf("Expected %d messages, got %d", total, received) } }) t.Run("no timeout", func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.PermissionErrOnSubscribe(true)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } defer sub.Unsubscribe() // Send some messages to ourselves. total := 100 for i := 0; i < total/2; i++ { if err := nc.Publish("foo", []byte("Hello")); err != nil { t.Fatalf("Error on publish: %v", err) } } received := 0 // publish some more messages asynchronously errCh := make(chan error, 1) go func() { for i := 0; i < total/2; i++ { if err := nc.Publish("foo", []byte("Hello")); err != nil { errCh <- err return } time.Sleep(10 * time.Millisecond) } close(errCh) }() for _, err := range sub.Msgs() { if err != nil { t.Fatalf("Error getting msg: %v", err) } received++ if received >= total { break } } err = <-errCh if err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout waiting for next message, got %v", err) } }) t.Run("permissions violation", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 authorization: { users = [ { user: test password: test permissions: { subscribe: { deny: "foo" } } } ] } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("test", "test"), nats.PermissionErrOnSubscribe(true)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() errs := make(chan error) go func() { var err error for _, err = range sub.Msgs() { break } errs <- err }() select { case e := <-errs: if !errors.Is(e, nats.ErrPermissionViolation) { t.Fatalf("Expected permissions error, got %v", e) } case <-time.After(2 * time.Second): t.Fatalf("Did not get the permission error") } _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrPermissionViolation) { t.Fatalf("Expected permissions violation error, got %v", err) } }) t.Run("attempt iterator on async sub", func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.PermissionErrOnSubscribe(true)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.Subscribe("foo", func(msg *nats.Msg) {}) if err != nil { t.Fatal("Failed to subscribe: ", err) } defer sub.Unsubscribe() for _, err := range sub.MsgsTimeout(100 * time.Millisecond) { if !errors.Is(err, nats.ErrSyncSubRequired) { t.Fatalf("Error on subscribe: %v", err) } } for _, err := range sub.Msgs() { if !errors.Is(err, nats.ErrSyncSubRequired) { t.Fatalf("Error on subscribe: %v", err) } } }) } func TestQueueSubscribeIterator(t *testing.T) { t.Run("basic", func(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() subs := make([]*nats.Subscription, 4) for i := 0; i < 4; i++ { sub, err := nc.QueueSubscribeSync("foo", "q") if err != nil { t.Fatal("Failed to subscribe: ", err) } subs[i] = sub defer sub.Unsubscribe() } // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { if err := nc.Publish("foo", []byte(fmt.Sprintf("%d", i))); err != nil { t.Fatalf("Error on publish: %v", err) } } wg := sync.WaitGroup{} wg.Add(100) startWg := sync.WaitGroup{} startWg.Add(4) for i := range subs { go func(i int) { startWg.Done() for _, err := range subs[i].MsgsTimeout(100 * time.Millisecond) { if err != nil { break } wg.Done() } }(i) } startWg.Wait() wg.Wait() for _, sub := range subs { if _, err = sub.NextMsg(100 * time.Millisecond); !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout waiting for next message, got %v", err) } } }) t.Run("permissions violation", func(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 authorization: { users = [ { user: test password: test permissions: { subscribe: { deny: "foo" } } } ] } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("test", "test"), nats.PermissionErrOnSubscribe(true)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.QueueSubscribeSync("foo", "q") if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() errs := make(chan error) go func() { var err error for _, err = range sub.MsgsTimeout(2 * time.Second) { break } errs <- err }() select { case e := <-errs: if !errors.Is(e, nats.ErrPermissionViolation) { t.Fatalf("Expected permissions error, got %v", e) } case <-time.After(2 * time.Second): t.Fatalf("Did not get the permission error") } }) } nats.go-1.41.0/test/nats_test.go000066400000000000000000000744221477351342400165030ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "fmt" "net" "net/url" "os" "reflect" "runtime" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats-server/v2/server" natsserver "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" "github.com/nats-io/nkeys" ) func TestMaxConnectionsReconnect(t *testing.T) { // Start first server s1Opts := natsserver.DefaultTestOptions s1Opts.Port = -1 s1Opts.MaxConn = 2 s1Opts.Cluster = server.ClusterOpts{Name: "test", Host: "127.0.0.1", Port: -1} s1 := RunServerWithOptions(&s1Opts) defer s1.Shutdown() // Start second server s2Opts := natsserver.DefaultTestOptions s2Opts.Port = -1 s2Opts.MaxConn = 2 s2Opts.Cluster = server.ClusterOpts{Name: "test", Host: "127.0.0.1", Port: -1} s2Opts.Routes = server.RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", s1Opts.Cluster.Port)) s2 := RunServerWithOptions(&s2Opts) defer s2.Shutdown() errCh := make(chan error, 2) reconnectCh := make(chan struct{}) opts := []nats.Option{ nats.MaxReconnects(2), nats.ReconnectWait(10 * time.Millisecond), nats.Timeout(200 * time.Millisecond), nats.DisconnectErrHandler(func(_ *nats.Conn, err error) { if err != nil { errCh <- err } }), nats.ReconnectHandler(func(_ *nats.Conn) { reconnectCh <- struct{}{} }), } // Create two connections (the current max) to first server nc1, _ := nats.Connect(s1.ClientURL(), opts...) defer nc1.Close() nc1.Flush() nc2, _ := nats.Connect(s1.ClientURL(), opts...) defer nc2.Close() nc2.Flush() if s1.NumClients() != 2 { t.Fatalf("Expected 2 client connections to first server. Got %d", s1.NumClients()) } if s2.NumClients() > 0 { t.Fatalf("Expected 0 client connections to second server. Got %d", s2.NumClients()) } // Kick one of our two server connections off first server. One client should reconnect to second server newS1Opts := s1Opts newS1Opts.MaxConn = 1 err := s1.ReloadOptions(&newS1Opts) if err != nil { t.Fatalf("Unexpected error changing max_connections [%s]", err) } select { case err := <-errCh: if err != nats.ErrMaxConnectionsExceeded { t.Fatalf("Unexpected error %v", err) } case <-time.After(2 * time.Second): t.Fatal("Timed out waiting for disconnect event") } select { case <-reconnectCh: case <-time.After(2 * time.Second): t.Fatal("Timed out waiting for reconnect event") } if s2.NumClients() <= 0 || s1.NumClients() > 1 { t.Fatalf("Expected client reconnection to second server") } } func TestNoEcho(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) nc, err := nats.Connect(url, nats.NoEcho()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() r := int32(0) _, err = nc.Subscribe("foo", func(m *nats.Msg) { atomic.AddInt32(&r, 1) }) if err != nil { t.Fatalf("Error on subscribe: %v", err) } err = nc.Publish("foo", []byte("Hello World")) if err != nil { t.Fatalf("Error on publish: %v", err) } nc.Flush() nc.Flush() if nr := atomic.LoadInt32(&r); nr != 0 { t.Fatalf("Expected no messages echoed back, received %d\n", nr) } } // Trust Server Tests var ( oSeed = []byte("SOAL7GTNI66CTVVNXBNQMG6V2HTDRWC3HGEP7D2OUTWNWSNYZDXWFOX4SU") aSeed = []byte("SAAASUPRY3ONU4GJR7J5RUVYRUFZXG56F4WEXELLLORQ65AEPSMIFTOJGE") uSeed = []byte("SUAMK2FG4MI6UE3ACF3FK3OIQBCEIEZV7NSWFFEW63UXMRLFM2XLAXK4GY") aJWT = "eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJLWjZIUVRXRlY3WkRZSFo3NklRNUhPM0pINDVRNUdJS0JNMzJTSENQVUJNNk5PNkU3TUhRIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJPRDJXMkk0TVZSQTVUR1pMWjJBRzZaSEdWTDNPVEtGV1FKRklYNFROQkVSMjNFNlA0NlMzNDVZWSIsInN1YiI6IkFBUFFKUVVQS1ZYR1c1Q1pINUcySEZKVUxZU0tERUxBWlJWV0pBMjZWRFpPN1dTQlVOSVlSRk5RIiwidHlwZSI6ImFjY291bnQiLCJuYXRzIjp7ImxpbWl0cyI6eyJzdWJzIjotMSwiY29ubiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.8o35JPQgvhgFT84Bi2Z-zAeSiLrzzEZn34sgr1DIBEDTwa-EEiMhvTeos9cvXxoZVCCadqZxAWVwS6paAMj8Bg" uJWT = "eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJBSFQzRzNXRElDS1FWQ1FUWFJUTldPRlVVUFRWNE00RFZQV0JGSFpJQUROWEZIWEpQR0FBIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJBQVBRSlFVUEtWWEdXNUNaSDVHMkhGSlVMWVNLREVMQVpSVldKQTI2VkRaTzdXU0JVTklZUkZOUSIsInN1YiI6IlVBVDZCV0NTQ1dMVUtKVDZLNk1CSkpPRU9UWFo1QUpET1lLTkVWUkZDN1ZOTzZPQTQzTjRUUk5PIiwidHlwZSI6InVzZXIiLCJuYXRzIjp7InB1YiI6e30sInN1YiI6e319fQ._8A1XM88Q2kp7XVJZ42bQuO9E3QPsNAGKtVjAkDycj8A5PtRPby9UpqBUZzBwiJQQO3TUcD5GGqSvsMm6X8hCQ" chained = ` -----BEGIN NATS USER JWT----- eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJBSFQzRzNXRElDS1FWQ1FUWFJUTldPRlVVUFRWNE00RFZQV0JGSFpJQUROWEZIWEpQR0FBIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJBQVBRSlFVUEtWWEdXNUNaSDVHMkhGSlVMWVNLREVMQVpSVldKQTI2VkRaTzdXU0JVTklZUkZOUSIsInN1YiI6IlVBVDZCV0NTQ1dMVUtKVDZLNk1CSkpPRU9UWFo1QUpET1lLTkVWUkZDN1ZOTzZPQTQzTjRUUk5PIiwidHlwZSI6InVzZXIiLCJuYXRzIjp7InB1YiI6e30sInN1YiI6e319fQ._8A1XM88Q2kp7XVJZ42bQuO9E3QPsNAGKtVjAkDycj8A5PtRPby9UpqBUZzBwiJQQO3TUcD5GGqSvsMm6X8hCQ ------END NATS USER JWT------ ************************* IMPORTANT ************************* NKEY Seed printed below can be used to sign and prove identity. NKEYs are sensitive and should be treated as secrets. -----BEGIN USER NKEY SEED----- SUAMK2FG4MI6UE3ACF3FK3OIQBCEIEZV7NSWFFEW63UXMRLFM2XLAXK4GY ------END USER NKEY SEED------ ` ) func runTrustServer() *server.Server { kp, _ := nkeys.FromSeed(oSeed) pub, _ := kp.PublicKey() opts := natsserver.DefaultTestOptions opts.Port = TEST_PORT opts.TrustedKeys = []string{string(pub)} s := RunServerWithOptions(&opts) mr := &server.MemAccResolver{} akp, _ := nkeys.FromSeed(aSeed) apub, _ := akp.PublicKey() mr.Store(string(apub), aJWT) s.SetAccountResolver(mr) return s } func createTmpFile(t *testing.T, content []byte) string { t.Helper() conf, err := os.CreateTemp("", "") if err != nil { t.Fatalf("Error creating conf file: %v", err) } fName := conf.Name() conf.Close() if err := os.WriteFile(fName, content, 0666); err != nil { os.Remove(fName) t.Fatalf("Error writing conf file: %v", err) } return fName } func TestBasicUserJWTAuth(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } ts := runTrustServer() defer ts.Shutdown() url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) _, err := nats.Connect(url) if err == nil { t.Fatalf("Expecting an error on connect") } jwtCB := func() (string, error) { return uJWT, nil } sigCB := func(nonce []byte) ([]byte, error) { kp, _ := nkeys.FromSeed(uSeed) sig, _ := kp.Sign(nonce) return sig, nil } // Try with user jwt but no sig _, err = nats.Connect(url, nats.UserJWT(jwtCB, nil)) if err == nil { t.Fatalf("Expecting an error on connect") } // Try with user callback _, err = nats.Connect(url, nats.UserJWT(nil, sigCB)) if err == nil { t.Fatalf("Expecting an error on connect") } nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) if err != nil { t.Fatalf("Expected to connect, got %v", err) } nc.Close() } func TestUserCredentialsTwoFiles(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } ts := runTrustServer() defer ts.Shutdown() userJWTFile := createTmpFile(t, []byte(uJWT)) defer os.Remove(userJWTFile) userSeedFile := createTmpFile(t, uSeed) defer os.Remove(userSeedFile) url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) nc, err := nats.Connect(url, nats.UserCredentials(userJWTFile, userSeedFile)) if err != nil { t.Fatalf("Expected to connect, got %v", err) } nc.Close() } func TestUserCredentialsChainedFile(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } ts := runTrustServer() defer ts.Shutdown() chainedFile := createTmpFile(t, []byte(chained)) defer os.Remove(chainedFile) url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) nc, err := nats.Connect(url, nats.UserCredentials(chainedFile)) if err != nil { t.Fatalf("Expected to connect, got %v", err) } nc.Close() chainedFile = createTmpFile(t, []byte("invalid content")) defer os.Remove(chainedFile) nc, err = nats.Connect(url, nats.UserCredentials(chainedFile)) if err == nil || !strings.Contains(err.Error(), "error signing nonce: unable to extract key pair from file") { if nc != nil { nc.Close() } t.Fatalf("Expected error about invalid creds file, got %q", err) } } func TestReconnectMissingCredentials(t *testing.T) { ts := runTrustServer() defer ts.Shutdown() chainedFile := createTmpFile(t, []byte(chained)) defer os.Remove(chainedFile) url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) errs := make(chan error, 1) nc, err := nats.Connect(url, nats.UserCredentials(chainedFile), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errs <- err })) if err != nil { t.Fatalf("Expected to connect, got %v", err) } defer nc.Close() os.Remove(chainedFile) ts.Shutdown() ts = runTrustServer() defer ts.Shutdown() select { case err := <-errs: if !strings.Contains(err.Error(), "no such file or directory") { t.Fatalf("Expected error about missing creds file, got %q", err) } case <-time.After(5 * time.Second): t.Fatal("Did not get error about missing creds file") } } func TestUserJWTAndSeed(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } ts := runTrustServer() defer ts.Shutdown() url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) nc, err := nats.Connect(url, nats.UserJWTAndSeed(uJWT, string(uSeed))) if err != nil { t.Fatalf("Expected to connect, got %v", err) } nc.Close() } // If we are using TLS and have multiple servers we try to match the IP // from a discovered server with the expected hostname for certs without IP // designations. In certain cases where there is a not authorized error and // we were trying the second server with the IP only and getting an error // that was hard to understand for the end user. This did require // Opts.Secure = false, but the fix removed the check on Opts.Secure to decide // if we need to save off the hostname that we connected to first. func TestUserCredentialsChainedFileNotFoundError(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } // Setup opts for both servers. kp, _ := nkeys.FromSeed(oSeed) pub, _ := kp.PublicKey() opts := natsserver.DefaultTestOptions opts.Port = -1 opts.Cluster.Port = -1 opts.TrustedKeys = []string{string(pub)} tc := &server.TLSConfigOpts{ CertFile: "./configs/certs/server_noip.pem", KeyFile: "./configs/certs/key_noip.pem", } var err error if opts.TLSConfig, err = server.GenTLSConfig(tc); err != nil { t.Fatalf("Unexpected error: %s", err) } // copy the opts for the second server. opts2 := opts sa := RunServerWithOptions(&opts) defer sa.Shutdown() routeAddr := fmt.Sprintf("nats-route://%s:%d", opts.Cluster.Host, opts.Cluster.Port) rurl, _ := url.Parse(routeAddr) opts2.Routes = []*url.URL{rurl} sb := RunServerWithOptions(&opts2) defer sb.Shutdown() wait := time.Now().Add(2 * time.Second) for time.Now().Before(wait) { sanr := sa.NumRoutes() sbnr := sb.NumRoutes() if sanr == 1 && sbnr == 1 { break } time.Sleep(50 * time.Millisecond) } // Make sure we get the right error here. nc, err := nats.Connect(fmt.Sprintf("nats://localhost:%d", opts.Port), nats.RootCAs("./configs/certs/ca.pem"), nats.UserCredentials("filenotfound.creds")) if err == nil { nc.Close() t.Fatalf("Expected an error on missing credentials file") } if !strings.Contains(err.Error(), "no such file or directory") && !strings.Contains(err.Error(), "The system cannot find the file specified") { t.Fatalf("Expected a missing file error, got %q", err) } } var natsReconnectOpts = nats.Options{ Url: fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT), AllowReconnect: true, MaxReconnect: 10, ReconnectWait: 100 * time.Millisecond, Timeout: nats.DefaultTimeout, } func TestNkeyAuth(t *testing.T) { if server.VERSION[0] == '1' { t.Skip() } seed := []byte("SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY") kp, _ := nkeys.FromSeed(seed) pub, _ := kp.PublicKey() sopts := natsserver.DefaultTestOptions sopts.Port = TEST_PORT sopts.Nkeys = []*server.NkeyUser{{Nkey: string(pub)}} ts := RunServerWithOptions(&sopts) defer ts.Shutdown() opts := natsReconnectOpts if _, err := opts.Connect(); err == nil { t.Fatalf("Expected to fail with no nkey auth defined") } opts.Nkey = string(pub) if _, err := opts.Connect(); err != nats.ErrNkeyButNoSigCB { t.Fatalf("Expected to fail with nkey defined but no signature callback, got %v", err) } badSign := func(nonce []byte) ([]byte, error) { return []byte("VALID?"), nil } opts.SignatureCB = badSign if _, err := opts.Connect(); err == nil { t.Fatalf("Expected to fail with nkey and bad signature callback") } goodSign := func(nonce []byte) ([]byte, error) { sig, err := kp.Sign(nonce) if err != nil { t.Fatalf("Failed signing nonce: %v", err) } return sig, nil } opts.SignatureCB = goodSign nc, err := opts.Connect() if err != nil { t.Fatalf("Expected to succeed but got %v", err) } defer nc.Close() // Now disconnect by killing the server and restarting. ts.Shutdown() ts = RunServerWithOptions(&sopts) defer ts.Shutdown() if err := nc.FlushTimeout(5 * time.Second); err != nil { t.Fatalf("Error on Flush: %v", err) } } func TestLookupHostResultIsRandomized(t *testing.T) { orgAddrs, err := net.LookupHost("localhost") if err != nil { t.Fatalf("Error looking up host: %v", err) } // We actually want the IPv4 and IPv6 addresses, so lets make sure. if !reflect.DeepEqual(orgAddrs, []string{"::1", "127.0.0.1"}) { t.Skip("Was looking for IPv4 and IPv6 addresses for localhost to perform test") } opts := natsserver.DefaultTestOptions opts.Host = "127.0.0.1" opts.Port = TEST_PORT s1 := RunServerWithOptions(&opts) defer s1.Shutdown() opts.Host = "::1" s2 := RunServerWithOptions(&opts) defer s2.Shutdown() for i := 0; i < 100; i++ { nc, err := nats.Connect(fmt.Sprintf("localhost:%d", TEST_PORT)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() } if ncls := s1.NumClients(); ncls < 35 || ncls > 65 { t.Fatalf("Does not seem balanced between multiple servers: s1:%d, s2:%d", s1.NumClients(), s2.NumClients()) } } func TestLookupHostResultIsNotRandomizedWithNoRandom(t *testing.T) { orgAddrs, err := net.LookupHost("localhost") if err != nil { t.Fatalf("Error looking up host: %v", err) } // We actually want the IPv4 and IPv6 addresses, so lets make sure. if !reflect.DeepEqual(orgAddrs, []string{"::1", "127.0.0.1"}) { t.Skip("Was looking for IPv4 and IPv6 addresses for localhost to perform test") } opts := natsserver.DefaultTestOptions opts.Host = orgAddrs[0] opts.Port = TEST_PORT s1 := RunServerWithOptions(&opts) defer s1.Shutdown() opts.Host = orgAddrs[1] s2 := RunServerWithOptions(&opts) defer s2.Shutdown() for i := 0; i < 100; i++ { nc, err := nats.Connect(fmt.Sprintf("localhost:%d", TEST_PORT), nats.DontRandomize()) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() } if ncls := s1.NumClients(); ncls != 100 { t.Fatalf("Expected all clients on first server, only got %d of 100", ncls) } } func TestConnectedAddr(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() var nc *nats.Conn if addr := nc.ConnectedAddr(); addr != "" { t.Fatalf("Expected empty result for nil connection, got %q", addr) } nc, err := nats.Connect(fmt.Sprintf("localhost:%d", TEST_PORT)) if err != nil { t.Fatalf("Error connecting: %v", err) } expected := s.Addr().String() if addr := nc.ConnectedAddr(); addr != expected { t.Fatalf("Expected address %q, got %q", expected, addr) } nc.Close() if addr := nc.ConnectedAddr(); addr != "" { t.Fatalf("Expected empty result for closed connection, got %q", addr) } } func TestSubscribeSyncRace(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() nc, err := nats.Connect(fmt.Sprintf("127.0.0.1:%d", TEST_PORT)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() go func() { time.Sleep(time.Millisecond) nc.Close() }() subj := "foo.sync.race" for i := 0; i < 10000; i++ { if _, err := nc.SubscribeSync(subj); err != nil { break } if _, err := nc.QueueSubscribeSync(subj, "gc"); err != nil { break } } } func TestBadSubjectsAndQueueNames(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() nc, err := nats.Connect(fmt.Sprintf("127.0.0.1:%d", TEST_PORT)) if err != nil { t.Fatalf("Error connecting: %v", err) } defer nc.Close() // Make sure we get errors on bad subjects (spaces, etc) // We want the client to protect the user. badSubs := []string{"foo bar", "foo..bar", ".foo", "bar.baz.", "baz\t.foo"} for _, subj := range badSubs { if _, err := nc.SubscribeSync(subj); err != nats.ErrBadSubject { t.Fatalf("Expected an error of ErrBadSubject for %q, got %v", subj, err) } } badQueues := []string{"foo group", "group\t1", "g1\r\n2"} for _, q := range badQueues { if _, err := nc.QueueSubscribeSync("foo", q); err != nats.ErrBadQueueName { t.Fatalf("Expected an error of ErrBadQueueName for %q, got %v", q, err) } } } func BenchmarkNextMsgNoTimeout(b *testing.B) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ncp, err := nats.Connect(fmt.Sprintf("127.0.0.1:%d", TEST_PORT)) if err != nil { b.Fatalf("Error connecting: %v", err) } ncs, err := nats.Connect(fmt.Sprintf("127.0.0.1:%d", TEST_PORT), nats.SyncQueueLen(b.N)) if err != nil { b.Fatalf("Error connecting: %v", err) } // Test processing speed so no long subject or payloads. subj := "a" sub, err := ncs.SubscribeSync(subj) if err != nil { b.Fatalf("Error subscribing: %v", err) } ncs.Flush() // Set it up so we can internally queue all the messages. sub.SetPendingLimits(b.N, b.N*1000) for i := 0; i < b.N; i++ { ncp.Publish(subj, nil) } ncp.Flush() // Wait for them to all be queued up, testing NextMsg not server here. // Only wait at most one second. wait := time.Now().Add(time.Second) for time.Now().Before(wait) { nm, _, err := sub.Pending() if err != nil { b.Fatalf("Error on Pending() - %v", err) } if nm >= b.N { break } time.Sleep(10 * time.Millisecond) } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := sub.NextMsg(10 * time.Millisecond); err != nil { b.Fatalf("Error getting message[%d]: %v", i, err) } } } func TestAuthErrorOnReconnect(t *testing.T) { // This is a bit of an artificial test, but it is to demonstrate // that if the client is disconnected from a server (not due to an auth error), // it will still correctly stop the reconnection logic if it gets twice an // auth error from the same server. o1 := natsserver.DefaultTestOptions o1.Port = -1 s1 := RunServerWithOptions(&o1) defer s1.Shutdown() o2 := natsserver.DefaultTestOptions o2.Port = -1 o2.Username = "ivan" o2.Password = "pwd" s2 := RunServerWithOptions(&o2) defer s2.Shutdown() dch := make(chan bool) cch := make(chan bool) urls := fmt.Sprintf("nats://%s:%d, nats://%s:%d", o1.Host, o1.Port, o2.Host, o2.Port) nc, err := nats.Connect(urls, nats.ReconnectWait(25*time.Millisecond), nats.ReconnectJitter(0, 0), nats.MaxReconnects(-1), nats.DontRandomize(), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}), nats.DisconnectErrHandler(func(_ *nats.Conn, e error) { dch <- true }), nats.ClosedHandler(func(_ *nats.Conn) { cch <- true })) if err != nil { t.Fatalf("Expected to connect, got err: %v\n", err) } defer nc.Close() s1.Shutdown() // wait for disconnect if e := WaitTime(dch, 5*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } // Wait for ClosedCB if e := WaitTime(cch, 5*time.Second); e != nil { reconnects := nc.Stats().Reconnects t.Fatalf("Did not receive a closed callback message, #reconnects: %v", reconnects) } // We should have stopped after 2 reconnects. if reconnects := nc.Stats().Reconnects; reconnects != 2 { t.Fatalf("Expected 2 reconnects, got %v", reconnects) } // Expect connection to be closed... if !nc.IsClosed() { t.Fatalf("Wrong status: %d\n", nc.Status()) } } func TestStatsRace(t *testing.T) { o := natsserver.DefaultTestOptions o.Port = -1 s := RunServerWithOptions(&o) defer s.Shutdown() nc, err := nats.Connect(fmt.Sprintf("nats://%s:%d", o.Host, o.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() wg := sync.WaitGroup{} wg.Add(1) ch := make(chan bool) go func() { defer wg.Done() for { select { case <-ch: return default: nc.Stats() } } }() nc.Subscribe("foo", func(_ *nats.Msg) {}) for i := 0; i < 1000; i++ { nc.Publish("foo", []byte("hello")) } close(ch) wg.Wait() } func TestRequestLeaksMapEntries(t *testing.T) { o := natsserver.DefaultTestOptions o.Port = -1 s := RunServerWithOptions(&o) defer s.Shutdown() nc, err := nats.Connect(fmt.Sprintf("nats://%s:%d", o.Host, o.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { nc.Publish(m.Reply, response) }) for i := 0; i < 100; i++ { msg, err := nc.Request("foo", nil, 500*time.Millisecond) if err != nil { t.Fatalf("Received an error on Request test: %s", err) } if !bytes.Equal(msg.Data, response) { t.Fatalf("Received invalid response") } } } func TestRequestMultipleReplies(t *testing.T) { o := natsserver.DefaultTestOptions o.Port = -1 s := RunServerWithOptions(&o) defer s.Shutdown() nc, err := nats.Connect(fmt.Sprintf("nats://%s:%d", o.Host, o.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() response := []byte("I will help you") nc.Subscribe("foo", func(m *nats.Msg) { m.Respond(response) m.Respond(response) }) nc.Flush() nc2, err := nats.Connect(fmt.Sprintf("nats://%s:%d", o.Host, o.Port)) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc2.Close() errCh := make(chan error, 1) // Send a request on bar and expect nothing go func() { if m, err := nc2.Request("bar", nil, 500*time.Millisecond); m != nil || err == nil { errCh <- fmt.Errorf("Expected no reply, got m=%+v err=%v", m, err) return } errCh <- nil }() // Send a request on foo, we use only one of the 2 replies if _, err := nc2.Request("foo", nil, time.Second); err != nil { t.Fatalf("Received an error on Request test: %s", err) } if e := <-errCh; e != nil { t.Fatal(e.Error()) } } func TestGetRTT(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(10*time.Millisecond), nats.ReconnectJitter(0, 0)) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() rtt, err := nc.RTT() if err != nil { t.Fatalf("Unexpected error getting RTT: %v", err) } if rtt > time.Second { t.Fatalf("RTT value too large: %v", rtt) } // We should not get a value when in any disconnected state. s.Shutdown() time.Sleep(5 * time.Millisecond) if _, err = nc.RTT(); err != nats.ErrDisconnected { t.Fatalf("Expected disconnected error getting RTT when disconnected, got %v", err) } } func TestGetClientIP(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() ip, err := nc.GetClientIP() if err != nil { t.Fatalf("Got error looking up IP: %v", err) } if !ip.IsLoopback() { t.Fatalf("Expected a loopback IP, got %v", ip) } nc.Close() if _, err := nc.GetClientIP(); err != nats.ErrConnectionClosed { t.Fatalf("Expected a connection closed error, got %v", err) } } func TestReconnectWaitJitter(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() rch := make(chan time.Time, 1) nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(100*time.Millisecond), nats.ReconnectJitter(500*time.Millisecond, 0), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- time.Now() }), ) if err != nil { t.Fatalf("Error during connect: %v", err) } defer nc.Close() s.Shutdown() start := time.Now() // Wait a bit so that the library tries a first time without waiting. time.Sleep(50 * time.Millisecond) s = RunServerOnPort(TEST_PORT) defer s.Shutdown() select { case end := <-rch: dur := end.Sub(start) // We should wait at least the reconnect wait + random up to 500ms. // Account for a bit of variation since we rely on the reconnect // handler which is not invoked in place. if dur < 90*time.Millisecond || dur > 800*time.Millisecond { t.Fatalf("Wrong wait: %v", dur) } case <-time.After(5 * time.Second): t.Fatalf("Should have reconnected") } nc.Close() // Use a long reconnect wait nc, err = nats.Connect(s.ClientURL(), nats.ReconnectWait(10*time.Minute)) if err != nil { t.Fatalf("Error during connect: %v", err) } defer nc.Close() // Cause a disconnect s.Shutdown() // Wait a bit for the reconnect loop to go into wait mode. time.Sleep(50 * time.Millisecond) s = RunServerOnPort(TEST_PORT) defer s.Shutdown() // Now close and expect the reconnect go routine to return.. nc.Close() // Wait a bit to give a chance for the go routine to exit. time.Sleep(50 * time.Millisecond) buf := make([]byte, 100000) n := runtime.Stack(buf, true) if strings.Contains(string(buf[:n]), "doReconnect") { t.Fatalf("doReconnect go routine still running:\n%s", buf[:n]) } } func TestCustomReconnectDelay(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() expectedAttempt := 1 errCh := make(chan error, 1) cCh := make(chan bool, 1) nc, err := nats.Connect(s.ClientURL(), nats.Timeout(100*time.Millisecond), // Need to lower for Windows tests nats.CustomReconnectDelay(func(n int) time.Duration { var err error var delay time.Duration if n != expectedAttempt { err = fmt.Errorf("Expected attempt to be %v, got %v", expectedAttempt, n) } else { expectedAttempt++ if n <= 4 { delay = 100 * time.Millisecond } } if err != nil { select { case errCh <- err: default: } } return delay }), nats.MaxReconnects(4), nats.ClosedHandler(func(_ *nats.Conn) { cCh <- true }), ) if err != nil { t.Fatalf("Error during connect: %v", err) } defer nc.Close() // Cause disconnect s.Shutdown() // We should be trying to reconnect 4 times start := time.Now() // Wait on error or completion of test. select { case e := <-errCh: if e != nil { t.Fatal(e.Error()) } case <-cCh: case <-time.After(2 * time.Second): t.Fatalf("No CB invoked") } // On Windows, a failed connect attempt will last as much as Timeout(), // so we need to take that into account. max := 500 * time.Millisecond if runtime.GOOS == "windows" { max = time.Second } if dur := time.Since(start); dur >= max { t.Fatalf("Waited too long on each reconnect: %v", dur) } } func TestMsg_RespondMsg(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() sub, err := nc.SubscribeSync(nats.NewInbox()) if err != nil { t.Fatalf("subscribe failed: %s", err) } nc.PublishMsg(&nats.Msg{Reply: sub.Subject, Subject: sub.Subject, Data: []byte("request")}) req, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("NextMsg failed: %s", err) } // verifies that RespondMsg sets the reply subject on msg based on req err = req.RespondMsg(&nats.Msg{Data: []byte("response")}) if err != nil { t.Fatalf("RespondMsg failed: %s", err) } resp, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("NextMsg failed: %s", err) } if !bytes.Equal(resp.Data, []byte("response")) { t.Fatalf("did not get correct response: %q", resp.Data) } } func TestCustomInboxPrefix(t *testing.T) { opts := &nats.Options{} for _, p := range []string{"$BOB.", "$BOB.*", "$BOB.>", ">", ".", "", "BOB.*.X", "BOB.>.X"} { err := nats.CustomInboxPrefix(p)(opts) if err == nil { t.Fatalf("Expected error for %q", p) } } s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.CustomInboxPrefix("$BOB")) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() sub, err := nc.Subscribe(nats.NewInbox(), func(msg *nats.Msg) { if !strings.HasPrefix(msg.Reply, "$BOB.") { t.Fatalf("invalid inbox subject %q received", msg.Reply) } if len(strings.Split(msg.Reply, ".")) != 3 { t.Fatalf("invalid number tokens in %s", msg.Reply) } msg.Respond([]byte("ok")) }) if err != nil { t.Fatalf("subscribe failed: %s", err) } resp, err := nc.Request(sub.Subject, nil, time.Second) if err != nil { t.Fatalf("request failed: %s", err) } if !bytes.Equal(resp.Data, []byte("ok")) { t.Fatalf("did not receive ok: %q", resp.Data) } } func TestRespInbox(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect(s.ClientURL()) if err != nil { t.Fatalf("Expected to connect to server, got %v", err) } defer nc.Close() if _, err := nc.Subscribe("foo", func(msg *nats.Msg) { lastDot := strings.LastIndex(msg.Reply, ".") if lastDot == -1 { msg.Respond([]byte(fmt.Sprintf("Invalid reply subject: %q", msg.Reply))) return } lastToken := msg.Reply[lastDot+1:] replySuffixLen := 8 if len(lastToken) != replySuffixLen { msg.Respond([]byte(fmt.Sprintf("Invalid last token: %q", lastToken))) return } msg.Respond(nil) }); err != nil { t.Fatalf("subscribe failed: %s", err) } resp, err := nc.Request("foo", []byte("check inbox"), time.Second) if err != nil { t.Fatalf("Request failed: %v", err) } if len(resp.Data) > 0 { t.Fatalf("Error: %s", resp.Data) } } func TestInProcessConn(t *testing.T) { s := RunServerOnPort(-1) defer s.Shutdown() nc, err := nats.Connect("", nats.InProcessServer(s)) if err != nil { t.Fatal(err) } defer nc.Close() // Status should be connected. if nc.Status() != nats.CONNECTED { t.Fatal("should be status CONNECTED") } // The server should respond to a request. if _, err := nc.RTT(); err != nil { t.Fatal(err) } } nats.go-1.41.0/test/netchan_test.go000066400000000000000000000171401477351342400171500ustar00rootroot00000000000000// Copyright 2013-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "testing" "time" "github.com/nats-io/nats.go" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn // NewEConn func NewEConn(t tLogger) *nats.EncodedConn { ec, err := nats.NewEncodedConn(NewDefaultConnection(t), nats.DEFAULT_ENCODER) if err != nil { t.Fatalf("Failed to create an encoded connection: %v\n", err) } return ec } func TestBadChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() if err := ec.BindSendChan("foo", "not a chan"); err == nil { t.Fatalf("Expected an Error when sending a non-channel\n") } if _, err := ec.BindRecvChan("foo", "not a chan"); err == nil { t.Fatalf("Expected an Error when sending a non-channel\n") } if err := ec.BindSendChan("foo", "not a chan"); err != nats.ErrChanArg { t.Fatalf("Expected an ErrChanArg when sending a non-channel\n") } if _, err := ec.BindRecvChan("foo", "not a chan"); err != nats.ErrChanArg { t.Fatalf("Expected an ErrChanArg when sending a non-channel\n") } } func TestSimpleSendChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() recv := make(chan bool) numSent := int32(22) ch := make(chan int32) if err := ec.BindSendChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } ec.Subscribe("foo", func(num int32) { if num != numSent { t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) } recv <- true }) // Send to 'foo' ch <- numSent if e := Wait(recv); e != nil { if ec.LastError() != nil { e = ec.LastError() } t.Fatalf("Did not receive the message: %s", e) } close(ch) } func TestFailedChannelSend(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() nc := ec.Conn ch := make(chan bool) wch := make(chan bool) nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { wch <- true } if err := ec.BindSendChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a receive channel: %v\n", err) } nc.Flush() go func() { time.Sleep(100 * time.Millisecond) nc.Close() }() func() { for { select { case ch <- true: case <-wch: return case <-time.After(time.Second): t.Fatal("Failed to get async error cb") } } }() ec = NewEConn(t) defer ec.Close() nc = ec.Conn bch := make(chan []byte) nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { wch <- true } if err := ec.BindSendChan("foo", bch); err != nil { t.Fatalf("Failed to bind to a receive channel: %v\n", err) } buf := make([]byte, 2*1024*1024) bch <- buf if e := Wait(wch); e != nil { t.Fatal("Failed to call async err handler") } } func TestSimpleRecvChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() numSent := int32(22) ch := make(chan int32) if _, err := ec.BindRecvChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a receive channel: %v\n", err) } ec.Publish("foo", numSent) // Receive from 'foo' select { case num := <-ch: if num != numSent { t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) } case <-time.After(1 * time.Second): t.Fatalf("Failed to receive a value, timed-out\n") } close(ch) } func TestQueueRecvChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() numSent := int32(22) ch := make(chan int32) if _, err := ec.BindRecvQueueChan("foo", "bar", ch); err != nil { t.Fatalf("Failed to bind to a queue receive channel: %v\n", err) } ec.Publish("foo", numSent) // Receive from 'foo' select { case num := <-ch: if num != numSent { t.Fatalf("Failed to receive correct value: %d vs %d\n", num, numSent) } case <-time.After(1 * time.Second): t.Fatalf("Failed to receive a value, timed-out\n") } close(ch) } func TestDecoderErrRecvChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() nc := ec.Conn wch := make(chan bool) nc.Opts.AsyncErrorCB = func(c *nats.Conn, s *nats.Subscription, e error) { wch <- true } ch := make(chan *int32) if _, err := ec.BindRecvChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } ec.Publish("foo", "Hello World") if e := Wait(wch); e != nil { t.Fatal("Failed to call async err handler") } } func TestRecvChanPanicOnClosedChan(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() ch := make(chan int) if _, err := ec.BindRecvChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } close(ch) ec.Publish("foo", 22) ec.Flush() } func TestRecvChanAsyncLeakGoRoutines(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() // Call this to make sure that we have everything setup connection wise ec.Flush() base := getStableNumGoroutine(t) ch := make(chan int) if _, err := ec.BindRecvChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } // Close the receive Channel close(ch) // The publish will trigger the close and shutdown of the Go routines ec.Publish("foo", 22) ec.Flush() checkNoGoroutineLeak(t, base, "closing channel") } func TestRecvChanLeakGoRoutines(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() // Call this to make sure that we have everything setup connection wise ec.Flush() base := getStableNumGoroutine(t) ch := make(chan int) sub, err := ec.BindRecvChan("foo", ch) if err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } sub.Unsubscribe() checkNoGoroutineLeak(t, base, "Unsubscribe()") } func TestRecvChanMultipleMessages(t *testing.T) { // Make sure we can receive more than one message. // In response to #25, which is a bug from fixing #22. s := RunDefaultServer() defer s.Shutdown() ec := NewEConn(t) defer ec.Close() // Num to send, should == len of messages queued. size := 10 ch := make(chan int, size) if _, err := ec.BindRecvChan("foo", ch); err != nil { t.Fatalf("Failed to bind to a send channel: %v\n", err) } for i := 0; i < size; i++ { ec.Publish("foo", 22) } ec.Flush() time.Sleep(10 * time.Millisecond) if lch := len(ch); lch != size { t.Fatalf("Expected %d messages queued, got %d.", size, lch) } } func BenchmarkPublishSpeedViaChan(b *testing.B) { b.StopTimer() s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL) if err != nil { b.Fatalf("Could not connect: %v\n", err) } ec, err := nats.NewEncodedConn(nc, nats.DEFAULT_ENCODER) if err != nil { b.Fatalf("Failed creating encoded connection: %v\n", err) } defer ec.Close() ch := make(chan int32, 1024) if err := ec.BindSendChan("foo", ch); err != nil { b.Fatalf("Failed to bind to a send channel: %v\n", err) } b.StartTimer() num := int32(22) for i := 0; i < b.N; i++ { ch <- num } // Make sure they are all processed. nc.Flush() b.StopTimer() } nats.go-1.41.0/test/norace_test.go000066400000000000000000000426451477351342400170070ustar00rootroot00000000000000// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !race && !skip_no_race_tests package test import ( "bytes" "context" "crypto/rand" "fmt" "io" "os" "strings" "testing" "time" "github.com/nats-io/nats.go" ) func TestNoRaceObjectContextOpt(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) time.AfterFunc(100*time.Millisecond, cancel) start := time.Now() _, err = obs.Put(&nats.ObjectMeta{Name: "TEST"}, &slow{1000}, nats.Context(ctx)) expectErr(t, err) if delta := time.Since(start); delta > time.Second { t.Fatalf("Cancel took too long: %v", delta) } si, err := js.StreamInfo("OBJ_OBJS") expectOk(t, err) if si.State.Msgs != 0 { t.Fatalf("Expected no messages after canceling put, got %+v", si.State) } // Now put a large object in there. blob := make([]byte, 16*1024*1024) rand.Read(blob) _, err = obs.PutBytes("BLOB", blob) expectOk(t, err) ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) time.AfterFunc(10*time.Millisecond, cancel) start = time.Now() _, err = obs.GetBytes("BLOB", nats.Context(ctx)) expectErr(t, err) if delta := time.Since(start); delta > 2500*time.Millisecond { t.Fatalf("Cancel took too long: %v", delta) } } type slow struct{ n int } func (sr *slow) Read(p []byte) (n int, err error) { if sr.n <= 0 { return 0, io.EOF } sr.n-- time.Sleep(10 * time.Millisecond) p[0] = 'A' return 1, nil } func TestNoRaceObjectDoublePut(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) _, err = obs.PutBytes("A", bytes.Repeat([]byte("A"), 1_000_000)) expectOk(t, err) _, err = obs.PutBytes("A", bytes.Repeat([]byte("a"), 20_000_000)) expectOk(t, err) _, err = obs.GetBytes("A") expectOk(t, err) } func TestNoRaceJetStreamConsumerSlowConsumer(t *testing.T) { // This test fails many times, need to look harder at the imbalance. t.SkipNow() s := RunServerOnPort(-1) defer shutdownJSServerAndRemoveStorage(t, s) if err := s.EnableJetStream(nil); err != nil { t.Fatalf("Expected no error, got %v", err) } nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "PENDING_TEST", Subjects: []string{"js.p"}, Storage: nats.MemoryStorage, }) if err != nil { t.Fatalf("stream create failed: %v", err) } // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) // Queue up 1M small messages. toSend := uint64(1000000) for i := uint64(0); i < toSend; i++ { nc.Publish("js.p", []byte("ok")) } nc.Flush() str, err := js.StreamInfo("PENDING_TEST") if err != nil { t.Fatal(err) } if nm := str.State.Msgs; nm != toSend { t.Fatalf("Expected to have stored all %d msgs, got only %d", toSend, nm) } var received uint64 done := make(chan bool, 1) js.Subscribe("js.p", func(m *nats.Msg) { received++ if received >= toSend { done <- true } meta, err := m.Metadata() if err != nil { t.Fatalf("could not get message metadata: %s", err) } if meta.Sequence.Stream != received { t.Errorf("Missed a sequence, was expecting %d but got %d, last error: '%v'", received, meta.Sequence.Stream, nc.LastError()) nc.Close() } m.Ack() }) select { case <-time.After(5 * time.Second): t.Fatalf("Failed to get all %d messages, only got %d", toSend, received) case <-done: } } func TestNoRaceJetStreamPushFlowControlHeartbeats_SubscribeSync(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) errHandler := nats.ErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) { t.Logf("WARN: %s", err) }) nc, js := jsClient(t, s, errHandler) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Burst and try to hit the flow control limit of the server. const totalMsgs = 16536 payload := strings.Repeat("A", 1024) for i := 0; i < totalMsgs; i++ { if _, err := js.Publish("foo", []byte(fmt.Sprintf("i:%d/", i)+payload)); err != nil { t.Fatal(err) } } hbTimer := 100 * time.Millisecond sub, err := js.SubscribeSync("foo", nats.AckWait(30*time.Second), nats.MaxDeliver(1), nats.EnableFlowControl(), nats.IdleHeartbeat(hbTimer), ) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } if !info.Config.FlowControl { t.Fatal("Expected Flow Control to be enabled") } if info.Config.Heartbeat != hbTimer { t.Errorf("Expected %v, got: %v", hbTimer, info.Config.Heartbeat) } m, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Error getting next message: %v", err) } meta, err := m.Metadata() if err != nil { t.Fatal(err) } if meta.NumPending > totalMsgs { t.Logf("WARN: More pending messages than expected (%v), got: %v", totalMsgs, meta.NumPending) } err = m.Ack() if err != nil { t.Fatal(err) } recvd := 1 timeout := time.Now().Add(10 * time.Second) for time.Now().Before(timeout) { m, err := sub.NextMsg(1 * time.Second) if err != nil { t.Fatalf("Error getting next message: %v", err) } if len(m.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } if err := m.AckSync(); err != nil { t.Fatalf("Error on ack message: %v", err) } recvd++ if recvd == totalMsgs { break } } t.Run("idle heartbeats", func(t *testing.T) { // Delay to get a few heartbeats. time.Sleep(4 * hbTimer) timeout = time.Now().Add(5 * time.Second) for time.Now().Before(timeout) { msg, err := sub.NextMsg(200 * time.Millisecond) if err != nil { if err == nats.ErrTimeout { // If timeout, ok to stop checking for the test. break } t.Fatal(err) } if len(msg.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } recvd++ meta, err := msg.Metadata() if err != nil { t.Fatal(err) } if meta.NumPending == 0 { break } } if recvd > totalMsgs { t.Logf("WARN: Received more messages than expected (%v), got: %v", totalMsgs, recvd) } }) t.Run("with context", func(t *testing.T) { sub, err := js.SubscribeSync("foo", nats.AckWait(30*time.Second), nats.Durable("bar"), nats.EnableFlowControl(), nats.IdleHeartbeat(hbTimer), ) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() info, err = sub.ConsumerInfo() if err != nil { t.Fatal(err) } if !info.Config.FlowControl { t.Fatal("Expected Flow Control to be enabled") } recvd = 0 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for { select { case <-ctx.Done(): t.Fatal(ctx.Err()) default: } m, err := sub.NextMsgWithContext(ctx) if err != nil { t.Fatalf("Error getting next message: %v", err) } if len(m.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } if err := m.Ack(); err != nil { t.Fatalf("Error on ack message: %v", err) } recvd++ if recvd >= totalMsgs { break } } // Delay to get a few heartbeats. time.Sleep(4 * hbTimer) ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() FOR_LOOP: for { select { case <-ctx.Done(): if ctx.Err() == context.DeadlineExceeded { break FOR_LOOP } default: } msg, err := sub.NextMsgWithContext(ctx) if err != nil { if err == context.DeadlineExceeded { break } t.Fatal(err) } if len(msg.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } recvd++ meta, err := msg.Metadata() if err != nil { t.Fatal(err) } if meta.NumPending == 0 { break } } if recvd > totalMsgs { t.Logf("WARN: Received more messages than expected (%v), got: %v", totalMsgs, recvd) } }) } func TestNoRaceJetStreamPushFlowControlHeartbeats_SubscribeAsync(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Burst and try to hit the flow control limit of the server. const totalMsgs = 16536 payload := strings.Repeat("A", 1024) for i := 0; i < totalMsgs; i++ { if _, err := js.Publish("foo", []byte(payload)); err != nil { t.Fatal(err) } } recvd := make(chan *nats.Msg, totalMsgs) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() errCh := make(chan error) hbTimer := 100 * time.Millisecond sub, err := js.Subscribe("foo", func(msg *nats.Msg) { if len(msg.Data) == 0 { errCh <- fmt.Errorf("Unexpected empty message: %+v", msg) } recvd <- msg if len(recvd) == totalMsgs { cancel() } }, nats.EnableFlowControl(), nats.IdleHeartbeat(hbTimer)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } if !info.Config.FlowControl { t.Fatal("Expected Flow Control to be enabled") } if info.Config.Heartbeat != hbTimer { t.Errorf("Expected %v, got: %v", hbTimer, info.Config.Heartbeat) } <-ctx.Done() got := len(recvd) expected := totalMsgs if got != expected { t.Errorf("Expected %v, got: %v", expected, got) } // Wait for a couple of heartbeats to arrive and confirm there is no error. select { case <-time.After(1 * time.Second): case err := <-errCh: t.Fatal(err) } } func TestNoRaceJetStreamPushFlowControlHeartbeats_ChanSubscribe(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) errHandler := nats.ErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) { t.Logf("WARN: %s : %v", err, sub.Subject) }) nc, js := jsClient(t, s, errHandler) defer nc.Close() var err error _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Burst and try to hit the flow control limit of the server. const totalMsgs = 16536 payload := strings.Repeat("A", 1024) for i := 0; i < totalMsgs; i++ { if _, err := js.Publish("foo", []byte(fmt.Sprintf("i:%d/", i)+payload)); err != nil { t.Fatal(err) } } hbTimer := 100 * time.Millisecond mch := make(chan *nats.Msg, 16536) sub, err := js.ChanSubscribe("foo", mch, nats.AckWait(30*time.Second), nats.MaxDeliver(1), nats.EnableFlowControl(), nats.IdleHeartbeat(hbTimer), ) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } if !info.Config.FlowControl { t.Fatal("Expected Flow Control to be enabled") } if info.Config.Heartbeat != hbTimer { t.Errorf("Expected %v, got: %v", hbTimer, info.Config.Heartbeat) } getNextMsg := func(mch chan *nats.Msg, timeout time.Duration) (*nats.Msg, error) { t.Helper() select { case m := <-mch: return m, nil case <-time.After(timeout): return nil, nats.ErrTimeout } } m, err := getNextMsg(mch, 1*time.Second) if err != nil { t.Fatalf("Error getting next message: %v", err) } meta, err := m.Metadata() if err != nil { t.Fatal(err) } if meta.NumPending > totalMsgs { t.Logf("WARN: More pending messages than expected (%v), got: %v", totalMsgs, meta.NumPending) } err = m.Ack() if err != nil { t.Fatal(err) } recvd := 1 ctx, done := context.WithTimeout(context.Background(), 10*time.Second) defer done() Loop: for { select { case <-ctx.Done(): break Loop case m := <-mch: if err != nil { t.Fatalf("Error getting next message: %v", err) } if len(m.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } if err := m.Ack(); err != nil { t.Fatalf("Error on ack message: %v", err) } recvd++ if recvd == totalMsgs { done() } } } t.Run("idle heartbeats", func(t *testing.T) { // Delay to get a few heartbeats. time.Sleep(4 * hbTimer) ctx, done := context.WithTimeout(context.Background(), 1*time.Second) defer done() Loop: for { select { case <-ctx.Done(): break Loop case msg := <-mch: if err != nil { if err == nats.ErrTimeout { // If timeout, ok to stop checking for the test. break Loop } t.Fatal(err) } if len(msg.Data) == 0 { t.Fatalf("Unexpected empty message: %+v", m) } recvd++ meta, err := msg.Metadata() if err != nil { t.Fatal(err) } if meta.NumPending == 0 { break Loop } } } if recvd > totalMsgs { t.Logf("WARN: Received more messages than expected (%v), got: %v", totalMsgs, recvd) } }) } func TestNoRaceJetStreamPushFlowControl_SubscribeAsyncAndChannel(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) errCh := make(chan error) errHandler := nats.ErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) { errCh <- err }) nc, err := nats.Connect(s.ClientURL(), errHandler) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer nc.Close() const totalMsgs = 10_000 js, err := nc.JetStream(nats.PublishAsyncMaxPending(totalMsgs)) if err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.AddStream(&nats.StreamConfig{ Name: "TEST", Subjects: []string{"foo"}, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } go func() { payload := strings.Repeat("O", 4096) for i := 0; i < totalMsgs; i++ { js.PublishAsync("foo", []byte(payload)) } }() // Small channel that blocks and then buffered channel that can deliver all // messages without blocking. recvd := make(chan *nats.Msg, 64) delivered := make(chan *nats.Msg, totalMsgs) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() // Dispatch channel consumer go func() { for m := range recvd { select { case <-ctx.Done(): return default: } delivered <- m if len(delivered) == totalMsgs { cancel() return } } }() sub, err := js.Subscribe("foo", func(msg *nats.Msg) { // Cause bottleneck by having channel block when full // because of work taking long. recvd <- msg }, nats.EnableFlowControl(), nats.IdleHeartbeat(5*time.Second)) if err != nil { t.Fatal(err) } defer sub.Unsubscribe() // Set this lower then normal to make sure we do not exceed bytes pending with FC turned on. sub.SetPendingLimits(totalMsgs, 4*1024*1024) // This matches server window for flowcontrol. info, err := sub.ConsumerInfo() if err != nil { t.Fatal(err) } if !info.Config.FlowControl { t.Fatal("Expected Flow Control to be enabled") } <-ctx.Done() got := len(delivered) expected := totalMsgs if got != expected { t.Errorf("Expected %d messages, got: %d", expected, got) } // Wait for a couple of heartbeats to arrive and confirm there is no error. select { case <-time.After(1 * time.Second): case err := <-errCh: t.Errorf("error handler: %v", err) } } func TestNoRaceJetStreamChanSubscribeStall(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 jetstream: enabled no_auth_user: pc accounts: { JS: { jetstream: enabled users: [ {user: pc, password: foo} ] }, } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() var err error // Create a stream. if _, err = js.AddStream(&nats.StreamConfig{Name: "STALL"}); err != nil { t.Fatalf("Unexpected error: %v", err) } _, err = js.StreamInfo("STALL") if err != nil { t.Fatalf("stream lookup failed: %v", err) } msg := []byte(strings.Repeat("A", 512)) toSend := 100_000 for i := 0; i < toSend; i++ { // Use plain NATS here for speed. if _, err := js.PublishAsync("STALL", msg); err != nil { t.Fatalf("Unexpected error: %v", err) } } select { case <-js.PublishAsyncComplete(): case <-time.After(5 * time.Second): t.Fatalf("Timeout waiting for messages") } nc.Flush() batch := 100 msgs := make(chan *nats.Msg, batch-2) sub, err := js.ChanSubscribe("STALL", msgs, nats.Durable("dlc"), nats.EnableFlowControl(), nats.IdleHeartbeat(5*time.Second), nats.MaxAckPending(batch-2), ) if err != nil { t.Fatalf("Unexpected error: %v", err) } defer sub.Unsubscribe() for received := 0; received < toSend; { select { case m := <-msgs: received++ meta, _ := m.Metadata() if meta.Sequence.Consumer != uint64(received) { t.Fatalf("Missed something, wanted %d but got %d", received, meta.Sequence.Consumer) } m.Ack() case <-time.After(time.Second): t.Fatalf("Timeout waiting for messages, last received was %d", received) } } } nats.go-1.41.0/test/object_test.go000066400000000000000000000747271477351342400170140ustar00rootroot00000000000000// Copyright 2022-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "context" "crypto/rand" "crypto/sha256" "fmt" "io" "os" "path" "path/filepath" "reflect" "testing" "time" "github.com/nats-io/nats.go" ) func TestObjectBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() _, err := js.CreateObjectStore(nil) expectErr(t, err, nats.ErrObjectConfigRequired) _, err = js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "notok!", Description: "testing"}) expectErr(t, err, nats.ErrInvalidStoreName) obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS", Description: "testing"}) expectOk(t, err) // Create ~16MB object. blob := make([]byte, 16*1024*1024+22) rand.Read(blob) now := time.Now().UTC().Round(time.Second) _, err = obs.PutBytes("BLOB", blob) expectOk(t, err) // Test info info, err := obs.GetInfo("BLOB") expectOk(t, err) if len(info.NUID) == 0 { t.Fatalf("Expected object to have a NUID") } if info.ModTime.IsZero() { t.Fatalf("Expected object to have a non-zero ModTime") } if mt := info.ModTime.Round(time.Second); mt.Sub(now) != 0 && mt.Sub(now) != time.Second { t.Fatalf("Expected ModTime to be about %v, got %v", now, mt) } // Make sure the stream is sealed. err = obs.Seal() expectOk(t, err) si, err := js.StreamInfo("OBJ_OBJS") expectOk(t, err) if !si.Config.Sealed { t.Fatalf("Expected the object stream to be sealed, got %+v", si) } status, err := obs.Status() expectOk(t, err) if !status.Sealed() { t.Fatalf("expected sealed status") } if status.Size() == 0 { t.Fatalf("size is 0") } if status.Storage() != nats.FileStorage { t.Fatalf("stauts reports %d storage", status.Storage()) } if status.Description() != "testing" { t.Fatalf("invalid description: '%s'", status.Description()) } // Now get the object back. result, err := obs.Get("BLOB") expectOk(t, err) expectOk(t, result.Error()) defer result.Close() // Now get the object back with a context option. result, err = obs.Get("BLOB", nats.Context(context.Background())) expectOk(t, err) expectOk(t, result.Error()) defer result.Close() // Check info. info, err = result.Info() expectOk(t, err) if info.Size != uint64(len(blob)) { t.Fatalf("Size does not match, %d vs %d", info.Size, len(blob)) } // Check result. copy, err := io.ReadAll(result) expectOk(t, err) if !bytes.Equal(copy, blob) { t.Fatalf("Result not the same") } // Check simple errors. _, err = obs.Get("FOO") expectErr(t, err, nats.ErrObjectNotFound) _, err = obs.Get("") expectErr(t, err, nats.ErrNameRequired) _, err = obs.PutBytes("", blob) expectErr(t, err, nats.ErrBadObjectMeta) // Test delete. err = js.DeleteObjectStore("OBJS") expectOk(t, err) _, err = obs.Get("BLOB") expectErr(t, err, nats.ErrStreamNotFound) } func TestGetObjectDigestMismatch(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "FOO"}) expectOk(t, err) _, err = obs.PutString("A", "abc") expectOk(t, err) res, err := obs.Get("A") expectOk(t, err) // first read should be successful data, err := io.ReadAll(res) expectOk(t, err) if string(data) != "abc" { t.Fatalf("Expected result: 'abc'; got: %s", string(data)) } info, err := obs.GetInfo("A") expectOk(t, err) // add new chunk after using Put(), this will change the digest hash on Get() _, err = js.Publish(fmt.Sprintf("$O.FOO.C.%s", info.NUID), []byte("123")) expectOk(t, err) res, err = obs.Get("A") expectOk(t, err) _, err = io.ReadAll(res) expectErr(t, err, nats.ErrDigestMismatch) expectErr(t, res.Error(), nats.ErrDigestMismatch) } func TestDefaultObjectStatus(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS", Description: "testing"}) expectOk(t, err) blob := make([]byte, 1024*1024+22) rand.Read(blob) _, err = obs.PutBytes("BLOB", blob) expectOk(t, err) status, err := obs.Status() expectOk(t, err) if status.BackingStore() != "JetStream" { t.Fatalf("invalid backing store kind: %s", status.BackingStore()) } bs := status.(*nats.ObjectBucketStatus) info := bs.StreamInfo() if info.Config.Name != "OBJ_OBJS" { t.Fatalf("invalid stream name %+v", info) } } func TestObjectFileBasics(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "FILES"}) expectOk(t, err) // Create ~8MB object. blob := make([]byte, 8*1024*1024+33) rand.Read(blob) tmpFile, err := os.CreateTemp("", "objfile") expectOk(t, err) defer os.Remove(tmpFile.Name()) // clean up err = os.WriteFile(tmpFile.Name(), blob, 0600) expectOk(t, err) _, err = obs.PutFile(tmpFile.Name()) expectOk(t, err) tmpResult, err := os.CreateTemp("", "objfileresult") expectOk(t, err) defer os.Remove(tmpResult.Name()) // clean up err = obs.GetFile(tmpFile.Name(), tmpResult.Name()) expectOk(t, err) // Make sure they are the same. original, err := os.ReadFile(tmpFile.Name()) expectOk(t, err) restored, err := os.ReadFile(tmpResult.Name()) expectOk(t, err) if !bytes.Equal(original, restored) { t.Fatalf("Files did not match") } } func TestObjectMulti(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "TEST_FILES"}) expectOk(t, err) numFiles := 0 fis, _ := os.ReadDir(".") for _, fi := range fis { fn := fi.Name() // Just grab clean test files. if filepath.Ext(fn) != ".go" || fn[0] == '.' || fn[0] == '#' { continue } _, err = obs.PutFile(fn) expectOk(t, err) numFiles++ } expectOk(t, obs.Seal()) _, err = js.StreamInfo("OBJ_TEST_FILES") expectOk(t, err) result, err := obs.Get("object_test.go") expectOk(t, err) expectOk(t, result.Error()) defer result.Close() _, err = result.Info() expectOk(t, err) copy, err := io.ReadAll(result) expectOk(t, err) orig, err := os.ReadFile(path.Join(".", "object_test.go")) expectOk(t, err) if !bytes.Equal(orig, copy) { t.Fatalf("Files did not match") } } func TestObjectDeleteMarkers(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) msg := bytes.Repeat([]byte("A"), 100) _, err = obs.PutBytes("A", msg) expectOk(t, err) err = obs.Delete("A") expectOk(t, err) si, err := js.StreamInfo("OBJ_OBJS") expectOk(t, err) // We should have one message left, the "delete" marker. if si.State.Msgs != 1 { t.Fatalf("Expected 1 marker msg, got %d msgs", si.State.Msgs) } // For deleted object return error _, err = obs.GetInfo("A") expectErr(t, err, nats.ErrObjectNotFound) _, err = obs.Get("A") expectErr(t, err, nats.ErrObjectNotFound) info, err := obs.GetInfo("A", nats.GetObjectInfoShowDeleted()) expectOk(t, err) // Make sure we have a delete marker, this will be there to drive Watch functionality. if !info.Deleted { t.Fatalf("Expected info to be marked as deleted") } _, err = obs.Get("A", nats.GetObjectShowDeleted()) expectOk(t, err) } func TestObjectMultiWithDelete(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "2OD"}) expectOk(t, err) pa := bytes.Repeat([]byte("A"), 2_000_000) pb := bytes.Repeat([]byte("B"), 3_000_000) _, err = obs.PutBytes("A", pa) expectOk(t, err) // Hold onto this so we can make sure DeleteObject clears all messages, chunks and meta. si, err := js.StreamInfo("OBJ_2OD") expectOk(t, err) _, err = obs.PutBytes("B", pb) expectOk(t, err) pb2, err := obs.GetBytes("B") expectOk(t, err) if !bytes.Equal(pb, pb2) { t.Fatalf("Did not retrieve same object") } // Now delete B err = obs.Delete("B") expectOk(t, err) siad, err := js.StreamInfo("OBJ_2OD") expectOk(t, err) if siad.State.Msgs != si.State.Msgs+1 { // +1 more delete marker. t.Fatalf("Expected to have %d msgs after delete, got %d", siad.State.Msgs, si.State.Msgs+1) } } func TestObjectNames(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) // Test filename like naming. _, err = obs.PutString("BLOB.txt", "A") expectOk(t, err) // Spaces ok _, err = obs.PutString("foo bar", "A") expectOk(t, err) // things that can be in a filename across multiple OSes // dot, asterisk, lt, gt, colon, double-quote, fwd-slash, backslash, pipe, question-mark, ampersand _, err = obs.PutString(".*<>:\"/\\|?&", "A") expectOk(t, err) // Errors _, err = obs.PutString("", "A") expectErr(t, err, nats.ErrBadObjectMeta) } func TestObjectMetadata(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() bucketMetadata := map[string]string{"foo": "bar", "baz": "boo"} obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{ Bucket: "META-TEST", Metadata: bucketMetadata, }) expectOk(t, err) status, err := obs.Status() expectOk(t, err) for k, v := range bucketMetadata { if status.Metadata()[k] != v { t.Fatalf("invalid bucket metadata: %+v", status.Metadata()) } } // Simple with no Meta. _, err = obs.PutString("A", "AAA") expectOk(t, err) buf := bytes.NewBufferString("CCC") objectMetadata := map[string]string{"name": "C", "description": "descC"} info, err := obs.Put(&nats.ObjectMeta{Name: "C", Metadata: objectMetadata}, buf) expectOk(t, err) if !reflect.DeepEqual(info.Metadata, objectMetadata) { t.Fatalf("invalid object metadata: %+v", info.Metadata) } meta := &nats.ObjectMeta{Name: "A"} meta.Description = "descA" meta.Headers = make(nats.Header) meta.Headers.Set("color", "blue") objectMetadata["description"] = "updated desc" objectMetadata["version"] = "0.1" meta.Metadata = objectMetadata // simple update that does not change the name, just adds data err = obs.UpdateMeta("A", meta) expectOk(t, err) info, err = obs.GetInfo("A") expectOk(t, err) if info.Name != "A" || info.Description != "descA" || info.Headers == nil || info.Headers.Get("color") != "blue" || !reflect.DeepEqual(info.Metadata, objectMetadata) { t.Fatalf("Update failed: %+v", info) } // update that changes the name and some data meta = &nats.ObjectMeta{Name: "B"} meta.Description = "descB" meta.Headers = make(nats.Header) meta.Headers.Set("color", "red") meta.Metadata = nil err = obs.UpdateMeta("A", meta) expectOk(t, err) _, err = obs.GetInfo("A") if err == nil { t.Fatal("Object meta for original name was not removed.") } info, err = obs.GetInfo("B") expectOk(t, err) if info.Name != "B" || info.Description != "descB" || info.Headers == nil || info.Headers.Get("color") != "red" || info.Metadata != nil { t.Fatalf("Update failed: %+v", info) } // Change meta name to existing object's name meta = &nats.ObjectMeta{Name: "C"} err = obs.UpdateMeta("B", meta) expectErr(t, err, nats.ErrObjectAlreadyExists) err = obs.Delete("C") expectOk(t, err) err = obs.UpdateMeta("B", meta) expectOk(t, err) // delete the object to test updating against a deleted object err = obs.Delete("C") expectOk(t, err) err = obs.UpdateMeta("C", meta) expectErr(t, err, nats.ErrUpdateMetaDeleted) err = obs.UpdateMeta("X", meta) if err == nil { t.Fatal("Expected an error when trying to update an object that does not exist.") } // can't have a link when putting an object meta.Opts = &nats.ObjectMetaOptions{Link: &nats.ObjectLink{Bucket: "DoesntMatter"}} _, err = obs.Put(meta, nil) expectErr(t, err, nats.ErrLinkNotAllowed) } func TestObjectWatch(t *testing.T) { expectUpdateF := func(t *testing.T, watcher nats.ObjectWatcher) func(name string) { return func(name string) { t.Helper() select { case info := <-watcher.Updates(): if false && info.Name != name { // TODO what is supposed to happen here? t.Fatalf("Expected update for %q, but got %+v", name, info) } case <-time.After(time.Second): t.Fatalf("Did not receive an update like expected") } } } expectNoMoreUpdatesF := func(t *testing.T, watcher nats.ObjectWatcher) func() { return func() { t.Helper() select { case info := <-watcher.Updates(): t.Fatalf("Got an unexpected update: %+v", info) case <-time.After(100 * time.Millisecond): } } } expectInitDoneF := func(t *testing.T, watcher nats.ObjectWatcher) func() { return func() { t.Helper() select { case info := <-watcher.Updates(): if info != nil { t.Fatalf("Did not get expected: %+v", info) } case <-time.After(time.Second): t.Fatalf("Did not receive a init done like expected") } } } t.Run("default watcher", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) watcher, err := obs.Watch() expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectNoMoreUpdates := expectNoMoreUpdatesF(t, watcher) expectInitDone := expectInitDoneF(t, watcher) // We should get a marker that is nil when all initial values are delivered. expectInitDone() _, err = obs.PutString("A", "AAA") expectOk(t, err) _, err = obs.PutString("B", "BBB") expectOk(t, err) // Initial Values. expectUpdate("A") expectUpdate("B") expectNoMoreUpdates() // Delete err = obs.Delete("A") expectOk(t, err) expectUpdate("A") expectNoMoreUpdates() // New _, err = obs.PutString("C", "CCC") expectOk(t, err) // Update Meta deletedInfo, err := obs.GetInfo("A", nats.GetObjectInfoShowDeleted()) expectOk(t, err) if !deletedInfo.Deleted { t.Fatalf("Expected object to be deleted.") } meta := &deletedInfo.ObjectMeta meta.Description = "Making a change." err = obs.UpdateMeta("A", meta) expectErr(t, err, nats.ErrUpdateMetaDeleted) }) t.Run("watcher with update", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) _, err = obs.PutString("A", "AAA") expectOk(t, err) _, err = obs.PutString("B", "BBB") expectOk(t, err) watcher, err := obs.Watch(nats.UpdatesOnly()) expectOk(t, err) defer watcher.Stop() expectUpdate := expectUpdateF(t, watcher) expectNoMoreUpdates := expectNoMoreUpdatesF(t, watcher) // when listening for updates only, we should not receive anything when watcher is started expectNoMoreUpdates() // Delete err = obs.Delete("A") expectOk(t, err) expectUpdate("A") expectNoMoreUpdates() // New _, err = obs.PutString("C", "CCC") expectOk(t, err) expectUpdate("C") }) t.Run("stop watcher should not block", func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "WATCH-TEST"}) expectOk(t, err) watcher, err := obs.Watch() expectOk(t, err) expectInitDone := expectInitDoneF(t, watcher) expectInitDone() err = watcher.Stop() expectOk(t, err) select { case _, ok := <-watcher.Updates(): if ok { t.Fatal("Expected channel to be closed") } case <-time.After(100 * time.Millisecond): return } }) } func TestObjectLinks(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() root, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "ROOT"}) expectOk(t, err) _, err = root.PutString("A", "AAA") expectOk(t, err) _, err = root.PutString("B", "BBB") expectOk(t, err) infoA, err := root.GetInfo("A") expectOk(t, err) // Link to individual object. infoLA, err := root.AddLink("LA", infoA) expectOk(t, err) expectLinkIsCorrect(t, infoA, infoLA) // link to a link _, err = root.AddLink("LALA", infoLA) expectErr(t, err, nats.ErrNoLinkToLink) dir, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "DIR"}) expectOk(t, err) _, err = dir.PutString("DIR/A", "DIR-AAA") expectOk(t, err) _, err = dir.PutString("DIR/B", "DIR-BBB") expectOk(t, err) infoB, err := dir.GetInfo("DIR/B") expectOk(t, err) infoLB, err := root.AddLink("DBL", infoB) expectOk(t, err) expectLinkIsCorrect(t, infoB, infoLB) // Now add whole other store as a link, like a directory. infoBucketLink, err := root.AddBucketLink("dir", dir) expectOk(t, err) _, err = root.Get(infoBucketLink.Name) expectErr(t, err, nats.ErrCantGetBucket) expectLinkPartsAreCorrect(t, infoBucketLink, "DIR", "") // Try to get a linked object, same bucket getLA, err := root.GetString("LA") expectOk(t, err) if getLA != "AAA" { t.Fatalf("Expected %q but got %q", "AAA", getLA) } // Try to get a linked object, cross bucket getDbl, err := root.GetString("DBL") expectOk(t, err) if getDbl != "DIR-BBB" { t.Fatalf("Expected %q but got %q", "DIR-BBB", getDbl) } // change a link infoB, err = root.GetInfo("B") expectOk(t, err) infoLA, err = root.GetInfo("LA") expectOk(t, err) expectLinkIsCorrect(t, infoA, infoLA) infoLA, err = root.AddLink("LA", infoB) expectOk(t, err) expectLinkIsCorrect(t, infoB, infoLA) // change a bucket link infoBucketLink, err = root.GetInfo("dir") expectOk(t, err) expectLinkPartsAreCorrect(t, infoBucketLink, "DIR", "") infoBucketLink, err = root.AddBucketLink("dir", root) expectOk(t, err) expectLinkPartsAreCorrect(t, infoBucketLink, "ROOT", "") // Check simple errors. _, err = root.AddLink("", infoB) expectErr(t, err, nats.ErrNameRequired) // A is already an object _, err = root.AddLink("A", infoB) expectErr(t, err, nats.ErrObjectAlreadyExists) _, err = root.AddLink("Nil Object", nil) expectErr(t, err, nats.ErrObjectRequired) infoB.Name = "" _, err = root.AddLink("Empty Info Name", infoB) expectErr(t, err, nats.ErrObjectRequired) // Check Error Link to a Link _, err = root.AddLink("Link To Link", infoLB) expectErr(t, err, nats.ErrNoLinkToLink) // Check Errors on bucket linking _, err = root.AddBucketLink("", root) expectErr(t, err, nats.ErrNameRequired) _, err = root.AddBucketLink("Nil Bucket", nil) expectErr(t, err, nats.ErrBucketRequired) err = root.Delete("A") expectOk(t, err) _, err = root.AddLink("ToDeletedStale", infoA) expectOk(t, err) // TODO deal with this in the code somehow infoA, err = root.GetInfo("A", nats.GetObjectInfoShowDeleted()) expectOk(t, err) _, err = root.AddLink("ToDeletedFresh", infoA) expectErr(t, err, nats.ErrNoLinkToDeleted) } func expectLinkIsCorrect(t *testing.T, originalObject *nats.ObjectInfo, linkObject *nats.ObjectInfo) { if linkObject.Opts.Link == nil || !expectLinkPartsAreCorrect(t, linkObject, originalObject.Bucket, originalObject.Name) { t.Fatalf("Link info not what was expected:\nActual: %+v\nTarget: %+v", linkObject, originalObject) } } func expectLinkPartsAreCorrect(t *testing.T, linkObject *nats.ObjectInfo, bucket, name string) bool { return linkObject.Opts.Link.Bucket == bucket && linkObject.Opts.Link.Name == name && !linkObject.ModTime.IsZero() && linkObject.NUID != "" } // Right now no history, just make sure we are cleaning up after ourselves. func TestObjectHistory(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) info, err := obs.PutBytes("A", bytes.Repeat([]byte("A"), 10)) expectOk(t, err) if info.Size != 10 { t.Fatalf("Invalid first put when testing history %+v", info) } info, err = obs.PutBytes("A", bytes.Repeat([]byte("a"), 20)) expectOk(t, err) if info.Size != 20 { t.Fatalf("Invalid second put when testing history %+v", info) } // Should only be 1 copy of 'A', so 1 data and 1 meta since history was not selected. si, err := js.StreamInfo("OBJ_OBJS") expectOk(t, err) if si.State.Msgs != 2 { t.Fatalf("Expected 2 msgs (1 data 1 meta) but got %d", si.State.Msgs) } } func TestObjectList(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() root, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "ROOT"}) expectOk(t, err) _, err = root.List() expectErr(t, err, nats.ErrNoObjectsFound) put := func(name, value string) { _, err = root.PutString(name, value) expectOk(t, err) } put("A", "AAA") put("B", "BBB") put("C", "CCC") put("B", "bbb") // Self link info, err := root.GetInfo("B") expectOk(t, err) _, err = root.AddLink("b", info) expectOk(t, err) put("D", "DDD") err = root.Delete("D") expectOk(t, err) t.Run("without deleted objects", func(t *testing.T) { lch, err := root.List() expectOk(t, err) omap := make(map[string]struct{}) for _, info := range lch { if _, ok := omap[info.Name]; ok { t.Fatalf("Already saw %q", info.Name) } omap[info.Name] = struct{}{} } if len(omap) != 4 { t.Fatalf("Expected 4 total objects, got %d", len(omap)) } expected := map[string]struct{}{ "A": struct{}{}, "B": struct{}{}, "C": struct{}{}, "b": struct{}{}, } if !reflect.DeepEqual(omap, expected) { t.Fatalf("Expected %+v but got %+v", expected, omap) } }) t.Run("with deleted objects", func(t *testing.T) { lch, err := root.List(nats.ListObjectsShowDeleted()) expectOk(t, err) res := make([]string, 0) for _, info := range lch { res = append(res, info.Name) } if len(res) != 5 { t.Fatalf("Expected 5 total objects, got %d", len(res)) } expected := []string{"A", "C", "B", "b", "D"} if !reflect.DeepEqual(res, expected) { t.Fatalf("Expected %+v but got %+v", expected, res) } }) t.Run("with context", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() lch, err := root.List(nats.Context(ctx)) expectOk(t, err) omap := make(map[string]struct{}) for _, info := range lch { if _, ok := omap[info.Name]; ok { t.Fatalf("Already saw %q", info.Name) } omap[info.Name] = struct{}{} } if len(omap) != 4 { t.Fatalf("Expected 4 total objects, got %d", len(omap)) } expected := map[string]struct{}{ "A": {}, "B": {}, "C": {}, "b": {}, } if !reflect.DeepEqual(omap, expected) { t.Fatalf("Expected %+v but got %+v", expected, omap) } }) } func TestObjectMaxBytes(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS", MaxBytes: 1024}) expectOk(t, err) status, err := obs.Status() expectOk(t, err) bs := status.(*nats.ObjectBucketStatus) info := bs.StreamInfo() if info.Config.MaxBytes != 1024 { t.Fatalf("invalid object stream MaxSize %+v", info.Config.MaxBytes) } } func TestListObjectStores(t *testing.T) { tests := []struct { name string bucketsNum int }{ { name: "single page", bucketsNum: 5, }, { name: "multi page", bucketsNum: 1025, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() // create stream without the chunk subject, but with OBJ_ prefix _, err := js.AddStream(&nats.StreamConfig{Name: "OBJ_FOO", Subjects: []string{"FOO.*"}}) expectOk(t, err) // create stream with chunk subject, but without "OBJ_" prefix _, err = js.AddStream(&nats.StreamConfig{Name: "FOO", Subjects: []string{"$O.ABC.C.>"}}) expectOk(t, err) for i := 0; i < test.bucketsNum; i++ { _, err = js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: fmt.Sprintf("OBJS_%d", i), MaxBytes: 1024}) expectOk(t, err) } names := make([]string, 0) for name := range js.ObjectStoreNames() { names = append(names, name) } if len(names) != test.bucketsNum { t.Fatalf("Invalid number of stream names; want: %d; got: %d", test.bucketsNum, len(names)) } infos := make([]nats.ObjectStoreStatus, 0) for info := range js.ObjectStores() { infos = append(infos, info) } if len(infos) != test.bucketsNum { t.Fatalf("Invalid number of streams; want: %d; got: %d", test.bucketsNum, len(infos)) } }) } } func TestGetObjectDigestValue(t *testing.T) { tests := []struct { inputFile string expected string }{ { inputFile: "digester_test_bytes_000100.txt", expected: "SHA-256=IdgP4UYMGt47rgecOqFoLrd24AXukHf5-SVzqQ5Psg8=", }, { inputFile: "digester_test_bytes_001000.txt", expected: "SHA-256=DZj4RnBpuEukzFIY0ueZ-xjnHY4Rt9XWn4Dh8nkNfnI=", }, { inputFile: "digester_test_bytes_010000.txt", expected: "SHA-256=RgaJ-VSJtjNvgXcujCKIvaheiX_6GRCcfdRYnAcVy38=", }, { inputFile: "digester_test_bytes_100000.txt", expected: "SHA-256=yan7pwBVnC1yORqqgBfd64_qAw6q9fNA60_KRiMMooE=", }, } for _, test := range tests { t.Run(test.inputFile, func(t *testing.T) { data, err := os.ReadFile(fmt.Sprintf("./testdata/%s", test.inputFile)) expectOk(t, err) h := sha256.New() h.Write(data) if res := nats.GetObjectDigestValue(h); res != test.expected { t.Fatalf("Invalid digest; want: %s; got: %s", test.expected, res) } }) } } func TestDecodeObjectDigest(t *testing.T) { tests := []struct { inputDigest string expectedFile string withError error }{ { expectedFile: "digester_test_bytes_000100.txt", inputDigest: "SHA-256=IdgP4UYMGt47rgecOqFoLrd24AXukHf5-SVzqQ5Psg8=", }, { expectedFile: "digester_test_bytes_001000.txt", inputDigest: "SHA-256=DZj4RnBpuEukzFIY0ueZ-xjnHY4Rt9XWn4Dh8nkNfnI=", }, { expectedFile: "digester_test_bytes_010000.txt", inputDigest: "SHA-256=RgaJ-VSJtjNvgXcujCKIvaheiX_6GRCcfdRYnAcVy38=", }, { expectedFile: "digester_test_bytes_100000.txt", inputDigest: "SHA-256=yan7pwBVnC1yORqqgBfd64_qAw6q9fNA60_KRiMMooE=", }, } for _, test := range tests { t.Run(test.expectedFile, func(t *testing.T) { expected, err := os.ReadFile(fmt.Sprintf("./testdata/%s", test.expectedFile)) h := sha256.New() h.Write(expected) expected = h.Sum(nil) expectOk(t, err) res, err := nats.DecodeObjectDigest(test.inputDigest) if test.withError != nil { expectErr(t, err, nats.ErrInvalidDigestFormat) return } expectOk(t, err) if !bytes.Equal(res[:], expected) { t.Fatalf("Invalid decoded value; want: %s; got: %s", expected, res) } }) } } func TestObjectStoreGetObjectContextTimeout(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: "OBJS"}) expectOk(t, err) blob := make([]byte, 1024) _, err = rand.Read(blob) expectOk(t, err) _, err = obs.PutBytes("blob", blob) expectOk(t, err) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() r, err := obs.Get("blob", nats.Context(ctx)) expectOk(t, err) time.Sleep(15 * time.Millisecond) var res []byte _, err = r.Read(res) expectErr(t, err, nats.ErrTimeout) r.Close() } func TestObjectStoreCompression(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() obj, err := js.CreateObjectStore(&nats.ObjectStoreConfig{ Bucket: "A", Compression: true, }) if err != nil { t.Fatalf("Error creating object store: %v", err) } status, err := obj.Status() if err != nil { t.Fatalf("Error getting bucket status: %v", err) } if !status.IsCompressed() { t.Fatalf("Expected bucket to be compressed") } objStream, err := js.StreamInfo("OBJ_A") if err != nil { t.Fatalf("Error getting stream info: %v", err) } if objStream.Config.Compression != nats.S2Compression { t.Fatalf("Expected stream to be compressed with S2") } } func TestObjectStoreMirror(t *testing.T) { s := RunBasicJetStreamServer() defer shutdownJSServerAndRemoveStorage(t, s) nc, js := jsClient(t, s) defer nc.Close() bucketName := "test-bucket" obs, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: bucketName, Description: "testing"}) expectOk(t, err) mirrorBucketName := "mirror-test-bucket" _, err = js.AddStream(&nats.StreamConfig{ Name: fmt.Sprintf("OBJ_%s", mirrorBucketName), Mirror: &nats.StreamSource{ Name: fmt.Sprintf("OBJ_%s", bucketName), SubjectTransforms: []nats.SubjectTransformConfig{ { Source: fmt.Sprintf("$O.%s.>", bucketName), Destination: fmt.Sprintf("$O.%s.>", mirrorBucketName), }, }, }, AllowRollup: true, // meta messages are always rollups }) if err != nil { t.Fatalf("Error creating object store bucket mirror: %v", err) } _, err = obs.PutString("A", "abc") expectOk(t, err) mirrorObs, err := js.ObjectStore(mirrorBucketName) expectOk(t, err) // Make sure we sync. checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { mirrorValue, err := mirrorObs.GetString("A") if err != nil { return err } if mirrorValue != "abc" { t.Fatalf("Expected mirrored object store value to be the same as original") } return nil }) watcher, err := mirrorObs.Watch() if err != nil { t.Fatalf("Error creating watcher: %v", err) } defer watcher.Stop() // expect to get one value and nil for { select { case info := <-watcher.Updates(): if info == nil { return } case <-time.After(2 * time.Second): t.Fatalf("Expected to receive an update") } } } nats.go-1.41.0/test/protobuf_test.go000066400000000000000000000101241477351342400173630ustar00rootroot00000000000000// Copyright 2015-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "reflect" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/encoders/protobuf" pb "github.com/nats-io/nats.go/encoders/protobuf/testdata" ) //lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn func NewProtoEncodedConn(tl TestLogger) *nats.EncodedConn { ec, err := nats.NewEncodedConn(NewConnection(tl, TEST_PORT), protobuf.PROTOBUF_ENCODER) if err != nil { tl.Fatalf("Failed to create an encoded connection: %v\n", err) } return ec } func TestEncProtoMarshalStruct(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewProtoEncodedConn(t) defer ec.Close() me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*pb.Person) me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} ch := make(chan error, 1) ec.Subscribe("protobuf_test", func(p *pb.Person) { var err error if !reflect.DeepEqual(p.ProtoReflect(), me.ProtoReflect()) { err = errors.New("Did not receive the correct protobuf response") } ch <- err }) ec.Publish("protobuf_test", me) select { case e := <-ch: if e != nil { t.Fatal(e.Error()) } case <-time.After(time.Second): t.Fatal("Failed to receive message") } } func TestEncProtoNilRequest(t *testing.T) { s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewProtoEncodedConn(t) defer ec.Close() testPerson := &pb.Person{Name: "Anatolii", Age: 25, Address: "Ukraine, Nikolaev"} //Subscribe with empty interface shouldn't failed on empty message ec.Subscribe("nil_test", func(_, reply string, _ any) { ec.Publish(reply, testPerson) }) resp := new(pb.Person) //Request with nil argument shouldn't failed with nil argument err := ec.Request("nil_test", nil, resp, 100*time.Millisecond) ec.Flush() if err != nil { t.Error("Fail to send empty message via encoded proto connection") } if !reflect.DeepEqual(testPerson.ProtoReflect(), resp.ProtoReflect()) { t.Error("Fail to receive encoded response") } } func BenchmarkProtobufMarshalStruct(b *testing.B) { me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*pb.Person) me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} encoder := &protobuf.ProtobufEncoder{} for n := 0; n < b.N; n++ { if _, err := encoder.Encode("protobuf_test", me); err != nil { b.Fatal("Couldn't serialize object", err) } } } func BenchmarkPublishProtobufStruct(b *testing.B) { // stop benchmark for set-up b.StopTimer() s := RunServerOnPort(TEST_PORT) defer s.Shutdown() ec := NewProtoEncodedConn(b) defer ec.Close() ch := make(chan bool) me := &pb.Person{Name: "derek", Age: 22, Address: "140 New Montgomery St"} me.Children = make(map[string]*pb.Person) me.Children["sam"] = &pb.Person{Name: "sam", Age: 19, Address: "140 New Montgomery St"} me.Children["meg"] = &pb.Person{Name: "meg", Age: 17, Address: "140 New Montgomery St"} ec.Subscribe("protobuf_test", func(p *pb.Person) { if !reflect.DeepEqual(p, me) { b.Fatalf("Did not receive the correct protobuf response") } ch <- true }) // resume benchmark b.StartTimer() for n := 0; n < b.N; n++ { ec.Publish("protobuf_test", me) if e := Wait(ch); e != nil { b.Fatal("Did not receive the message") } } } nats.go-1.41.0/test/reconnect_test.go000066400000000000000000000700001477351342400175020ustar00rootroot00000000000000// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "net" "net/url" "strconv" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/jwt" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/nats-io/nkeys" ) func startReconnectServer(t *testing.T) *server.Server { return RunServerOnPort(TEST_PORT) } func TestReconnectTotalTime(t *testing.T) { opts := nats.GetDefaultOptions() totalReconnectTime := time.Duration(opts.MaxReconnect) * opts.ReconnectWait if totalReconnectTime < (2 * time.Minute) { t.Fatalf("Total reconnect time should be at least 2 mins: Currently %v\n", totalReconnectTime) } } func TestDefaultReconnectJitter(t *testing.T) { opts := nats.GetDefaultOptions() if opts.ReconnectJitter != nats.DefaultReconnectJitter { t.Fatalf("Expected default jitter for non TLS to be %v, got %v", nats.DefaultReconnectJitter, opts.ReconnectJitter) } if opts.ReconnectJitterTLS != nats.DefaultReconnectJitterTLS { t.Fatalf("Expected default jitter for TLS to be %v, got %v", nats.DefaultReconnectJitterTLS, opts.ReconnectJitterTLS) } } func TestReconnectDisallowedFlags(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() ch := make(chan bool) opts := nats.GetDefaultOptions() opts.Url = fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) opts.AllowReconnect = false opts.ClosedCB = func(_ *nats.Conn) { ch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() ts.Shutdown() if e := Wait(ch); e != nil { t.Fatal("Did not trigger ClosedCB correctly") } } func TestReconnectAllowedFlags(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() ch := make(chan bool) dch := make(chan bool) opts := nats.GetDefaultOptions() opts.Url = fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) opts.AllowReconnect = true opts.MaxReconnect = 2 opts.ReconnectWait = 1 * time.Second nats.ReconnectJitter(0, 0)(&opts) opts.ClosedCB = func(_ *nats.Conn) { ch <- true } opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() ts.Shutdown() // We want wait to timeout here, and the connection // should not trigger the Close CB. if e := WaitTime(ch, 500*time.Millisecond); e == nil { t.Fatal("Triggered ClosedCB incorrectly") } // We should wait to get the disconnected callback to ensure // that we are in the process of reconnecting. if e := Wait(dch); e != nil { t.Fatal("DisconnectedErrCB should have been triggered") } if !nc.IsReconnecting() { t.Fatal("Expected to be in a reconnecting state") } // clear the CloseCB since ch will block nc.Opts.ClosedCB = nil } func TestConnCloseBreaksReconnectLoop(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() cch := make(chan bool) opts := reconnectOpts // Bump the max reconnect attempts opts.MaxReconnect = 100 opts.ClosedCB = func(_ *nats.Conn) { cch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() nc.Flush() // Shutdown the server ts.Shutdown() // Wait a second, then close the connection time.Sleep(time.Second) // Close the connection, this should break the reconnect loop. // Do this in a go routine since the issue was that Close() // would block until the reconnect loop is done. go nc.Close() // Even on Windows (where a createConn takes more than a second) // we should be able to break the reconnect loop with the following // timeout. if err := WaitTime(cch, 3*time.Second); err != nil { t.Fatal("Did not get a closed callback") } } func TestBasicReconnectFunctionality(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() ch := make(chan bool) dch := make(chan bool, 2) opts := reconnectOpts opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v\n", err) } defer nc.Close() testString := "bar" nc.Subscribe("foo", func(m *nats.Msg) { if string(m.Data) != testString { t.Fatal("String doesn't match") } ch <- true }) nc.Flush() ts.Shutdown() // server is stopped here... if err := Wait(dch); err != nil { t.Fatalf("Did not get the disconnected callback on time\n") } if err := nc.Publish("foo", []byte("bar")); err != nil { t.Fatalf("Failed to publish message: %v\n", err) } ts = startReconnectServer(t) defer ts.Shutdown() if err := nc.FlushTimeout(5 * time.Second); err != nil { t.Fatalf("Error on Flush: %v", err) } if e := Wait(ch); e != nil { t.Fatal("Did not receive our message") } expectedReconnectCount := uint64(1) reconnectCount := nc.Stats().Reconnects if reconnectCount != expectedReconnectCount { t.Fatalf("Reconnect count incorrect: %d vs %d\n", reconnectCount, expectedReconnectCount) } } func TestExtendedReconnectFunctionality(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() opts := reconnectOpts dch := make(chan bool, 2) opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } rch := make(chan bool, 1) opts.ReconnectedCB = func(_ *nats.Conn) { rch <- true } nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() testString := "bar" received := int32(0) nc.Subscribe("foo", func(*nats.Msg) { atomic.AddInt32(&received, 1) }) sub, _ := nc.Subscribe("foobar", func(*nats.Msg) { atomic.AddInt32(&received, 1) }) nc.Publish("foo", []byte(testString)) nc.Flush() ts.Shutdown() // server is stopped here.. // wait for disconnect if e := WaitTime(dch, 2*time.Second); e != nil { t.Fatal("Did not receive a disconnect callback message") } // Sub while disconnected nc.Subscribe("bar", func(*nats.Msg) { atomic.AddInt32(&received, 1) }) // Unsub foobar while disconnected sub.Unsubscribe() if err = nc.Publish("foo", []byte(testString)); err != nil { t.Fatalf("Received an error after disconnect: %v\n", err) } if err = nc.Publish("bar", []byte(testString)); err != nil { t.Fatalf("Received an error after disconnect: %v\n", err) } ts = startReconnectServer(t) defer ts.Shutdown() // server is restarted here.. // wait for reconnect if e := WaitTime(rch, 2*time.Second); e != nil { t.Fatal("Did not receive a reconnect callback message") } if err = nc.Publish("foobar", []byte(testString)); err != nil { t.Fatalf("Received an error after server restarted: %v\n", err) } if err = nc.Publish("foo", []byte(testString)); err != nil { t.Fatalf("Received an error after server restarted: %v\n", err) } ch := make(chan bool) nc.Subscribe("done", func(*nats.Msg) { ch <- true }) nc.Publish("done", nil) if e := Wait(ch); e != nil { t.Fatal("Did not receive our message") } // Sleep a bit to guarantee scheduler runs and process all subs. time.Sleep(50 * time.Millisecond) if atomic.LoadInt32(&received) != 4 { t.Fatalf("Received != %d, equals %d\n", 4, received) } } func TestQueueSubsOnReconnect(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() opts := reconnectOpts // Allow us to block on reconnect complete. reconnectsDone := make(chan bool) opts.ReconnectedCB = func(nc *nats.Conn) { reconnectsDone <- true } // Create connection nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v\n", err) } defer nc.Close() // To hold results. results := make(map[int]int) var mu sync.Mutex // Make sure we got what we needed, 1 msg only and all seqnos accounted for.. checkResults := func(numSent int) { mu.Lock() defer mu.Unlock() for i := 0; i < numSent; i++ { if results[i] != 1 { t.Fatalf("Received incorrect number of messages, [%d] for seq: %d\n", results[i], i) } } // Auto reset results map results = make(map[int]int) } subj := "foo.bar" qgroup := "workers" cb := func(m *nats.Msg) { mu.Lock() defer mu.Unlock() seqno, err := strconv.Atoi(string(m.Data)) if err != nil { t.Fatalf("Received an invalid sequence number: %v\n", err) } results[seqno] = results[seqno] + 1 } // Create Queue Subscribers nc.QueueSubscribe(subj, qgroup, cb) nc.QueueSubscribe(subj, qgroup, cb) nc.Flush() // Helper function to send messages and check results. sendAndCheckMsgs := func(numToSend int) { for i := 0; i < numToSend; i++ { nc.Publish(subj, []byte(fmt.Sprint(i))) } // Wait for processing. nc.Flush() time.Sleep(50 * time.Millisecond) // Check Results checkResults(numToSend) } // Base Test sendAndCheckMsgs(10) // Stop and restart server ts.Shutdown() ts = startReconnectServer(t) defer ts.Shutdown() if err := Wait(reconnectsDone); err != nil { t.Fatal("Did not get the ReconnectedCB!") } // Reconnect Base Test sendAndCheckMsgs(10) } func TestIsClosed(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() nc := NewConnection(t, TEST_PORT) defer nc.Close() if nc.IsClosed() { t.Fatalf("IsClosed returned true when the connection is still open.") } ts.Shutdown() if nc.IsClosed() { t.Fatalf("IsClosed returned true when the connection is still open.") } ts = startReconnectServer(t) defer ts.Shutdown() if nc.IsClosed() { t.Fatalf("IsClosed returned true when the connection is still open.") } nc.Close() if !nc.IsClosed() { t.Fatalf("IsClosed returned false after Close() was called.") } } func TestIsReconnectingAndStatus(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() disconnectedch := make(chan bool, 3) reconnectch := make(chan bool, 2) opts := nats.GetDefaultOptions() opts.Url = fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) opts.AllowReconnect = true opts.MaxReconnect = 10000 opts.ReconnectWait = 100 * time.Millisecond nats.ReconnectJitter(0, 0)(&opts) opts.DisconnectedErrCB = func(_ *nats.Conn, _ error) { disconnectedch <- true } opts.ReconnectedCB = func(_ *nats.Conn) { reconnectch <- true } // Connect, verify initial reconnecting state check, then stop the server nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() if nc.IsReconnecting() { t.Fatalf("IsReconnecting returned true when the connection is still open.") } if status := nc.Status(); status != nats.CONNECTED { t.Fatalf("Status returned %d when connected instead of CONNECTED", status) } ts.Shutdown() // Wait until we get the disconnected callback if e := Wait(disconnectedch); e != nil { t.Fatalf("Disconnect callback wasn't triggered: %v", e) } if !nc.IsReconnecting() { t.Fatalf("IsReconnecting returned false when the client is reconnecting.") } if status := nc.Status(); status != nats.RECONNECTING { t.Fatalf("Status returned %d when reconnecting instead of CONNECTED", status) } ts = startReconnectServer(t) defer ts.Shutdown() // Wait until we get the reconnect callback if e := Wait(reconnectch); e != nil { t.Fatalf("Reconnect callback wasn't triggered: %v", e) } if nc.IsReconnecting() { t.Fatalf("IsReconnecting returned true after the connection was reconnected.") } if status := nc.Status(); status != nats.CONNECTED { t.Fatalf("Status returned %d when reconnected instead of CONNECTED", status) } // Close the connection, reconnecting should still be false nc.Close() if nc.IsReconnecting() { t.Fatalf("IsReconnecting returned true after Close() was called.") } if status := nc.Status(); status != nats.CLOSED { t.Fatalf("Status returned %d after Close() was called instead of CLOSED", status) } } func TestFullFlushChanDuringReconnect(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() reconnectch := make(chan bool, 2) opts := nats.GetDefaultOptions() opts.Url = fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT) opts.AllowReconnect = true opts.MaxReconnect = 10000 opts.ReconnectWait = 100 * time.Millisecond nats.ReconnectJitter(0, 0)(&opts) opts.ReconnectedCB = func(_ *nats.Conn) { reconnectch <- true } // Connect nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() // Channel used to make the go routine sending messages to stop. stop := make(chan bool) // While connected, publish as fast as we can go func() { for i := 0; ; i++ { _ = nc.Publish("foo", []byte("hello")) // Make sure we are sending at least flushChanSize (1024) messages // before potentially pausing. if i%2000 == 0 { select { case <-stop: return default: time.Sleep(100 * time.Millisecond) } } } }() // Send a bit... time.Sleep(500 * time.Millisecond) // Shut down the server ts.Shutdown() // Continue sending while we are disconnected time.Sleep(time.Second) // Restart the server ts = startReconnectServer(t) defer ts.Shutdown() // Wait for the reconnect CB to be invoked (but not for too long) if e := WaitTime(reconnectch, 5*time.Second); e != nil { t.Fatalf("Reconnect callback wasn't triggered: %v", e) } close(stop) } func TestReconnectVerbose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() o := nats.GetDefaultOptions() o.ReconnectWait = 50 * time.Millisecond o.Verbose = true rch := make(chan bool) o.ReconnectedCB = func(_ *nats.Conn) { rch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() err = nc.Flush() if err != nil { t.Fatalf("Error during flush: %v", err) } s.Shutdown() s = RunDefaultServer() defer s.Shutdown() if e := Wait(rch); e != nil { t.Fatal("Should have reconnected ok") } err = nc.Flush() if err != nil { t.Fatalf("Error during flush: %v", err) } } func TestReconnectBufSizeOption(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect("nats://127.0.0.1:4222", nats.ReconnectBufSize(32)) if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() if nc.Opts.ReconnectBufSize != 32 { t.Fatalf("ReconnectBufSize should be 32 but it is %d", nc.Opts.ReconnectBufSize) } } func TestReconnectBufSize(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() o := nats.GetDefaultOptions() o.ReconnectBufSize = 32 // 32 bytes dch := make(chan bool) o.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() err = nc.Flush() if err != nil { t.Fatalf("Error during flush: %v", err) } // Force disconnected state. s.Shutdown() if e := Wait(dch); e != nil { t.Fatal("DisconnectedErrCB should have been triggered") } msg := []byte("food") // 4 bytes paylaod, total proto is 16 bytes // These should work, 2X16 = 32 if err := nc.Publish("foo", msg); err != nil { t.Fatalf("Failed to publish message: %v\n", err) } if err := nc.Publish("foo", msg); err != nil { t.Fatalf("Failed to publish message: %v\n", err) } // This should fail since we have exhausted the backing buffer. if err := nc.Publish("foo", msg); err == nil { t.Fatalf("Expected to fail to publish message: got no error\n") } nc.Buffered() } // When a cluster is fronted by a single DNS name (desired) but communicates IPs to clients (also desired), // and we use TLS, we want to make sure we do the right thing connecting to an IP directly for TLS to work. // The reason this may happen is that the cluster has a single DNS name and a single certificate, but the cluster // wants to vend out IPs and not wait on DNS for topology changes and failover. func TestReconnectTLSHostNoIP(t *testing.T) { sa, optsA := RunServerWithConfig("./configs/tls_noip_a.conf") defer sa.Shutdown() sb, optsB := RunServerWithConfig("./configs/tls_noip_b.conf") defer sb.Shutdown() // Wait for cluster to form. wait := time.Now().Add(2 * time.Second) for time.Now().Before(wait) { sanr := sa.NumRoutes() sbnr := sb.NumRoutes() if sanr == 1 && sbnr == 1 { break } time.Sleep(50 * time.Millisecond) } endpoint := fmt.Sprintf("%s:%d", optsA.Host, optsA.Port) secureURL := fmt.Sprintf("tls://%s:%s@%s/", optsA.Username, optsA.Password, endpoint) dch := make(chan bool, 2) dcb := func(_ *nats.Conn, _ error) { dch <- true } rch := make(chan bool) rcb := func(_ *nats.Conn) { rch <- true } nc, err := nats.Connect(secureURL, nats.RootCAs("./configs/certs/ca.pem"), nats.DisconnectErrHandler(dcb), nats.ReconnectHandler(rcb)) if err != nil { t.Fatalf("Failed to create secure (TLS) connection: %v", err) } defer nc.Close() // Wait for DiscoveredServers() to be 1. wait = time.Now().Add(2 * time.Second) for time.Now().Before(wait) { if len(nc.DiscoveredServers()) == 1 { break } } // Make sure this is the server B info, and that it is an IP. expectedDiscoverURL := fmt.Sprintf("tls://%s:%d", optsB.Host, optsB.Port) eurl, err := url.Parse(expectedDiscoverURL) if err != nil { t.Fatalf("Expected to parse discovered server URL: %v", err) } if addr := net.ParseIP(eurl.Hostname()); addr == nil { t.Fatalf("Expected the discovered server to be an IP, got %v", eurl.Hostname()) } ds := nc.DiscoveredServers() if ds[0] != expectedDiscoverURL { t.Fatalf("Expected %q, got %q", expectedDiscoverURL, ds[0]) } // Force us to switch servers. sa.Shutdown() if e := Wait(dch); e != nil { t.Fatal("DisconnectedErrCB should have been triggered") } if e := WaitTime(rch, time.Second); e != nil { t.Fatalf("ReconnectedCB should have been triggered: %v", nc.LastError()) } } var reconnectOpts = nats.Options{ Url: fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT), AllowReconnect: true, MaxReconnect: 10, ReconnectWait: 100 * time.Millisecond, Timeout: nats.DefaultTimeout, } func TestConnCloseNoCallback(t *testing.T) { ts := startReconnectServer(t) defer ts.Shutdown() // create a connection that manually sets the options var conns []*nats.Conn cch := make(chan string, 2) opts := reconnectOpts opts.ClosedCB = func(_ *nats.Conn) { cch <- "manual" } opts.NoCallbacksAfterClientClose = true nc, err := opts.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } conns = append(conns, nc) // and another connection that uses the option nc2, err := nats.Connect(reconnectOpts.Url, nats.NoCallbacksAfterClientClose(), nats.ClosedHandler(func(_ *nats.Conn) { cch <- "opts" })) if err != nil { t.Fatalf("Should have connected ok: %v", err) } conns = append(conns, nc2) // defer close() for safety, flush() and close() for _, c := range conns { defer c.Close() c.Flush() // Close the connection, we don't expect to get a notification c.Close() } // if the timeout happens we didn't get data from the channel // if we get a value from the channel that connection type failed. select { case <-time.After(500 * time.Millisecond): // test passed - we timed so no callback was called case what := <-cch: t.Fatalf("%s issued a callback and it shouldn't have", what) } } func TestReconnectBufSizeDisable(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() o := nats.GetDefaultOptions() // Disable buffering to always get a synchronous error when publish fails. o.ReconnectBufSize = -1 dch := make(chan bool) o.DisconnectedErrCB = func(_ *nats.Conn, _ error) { dch <- true } nc, err := o.Connect() if err != nil { t.Fatalf("Should have connected ok: %v", err) } defer nc.Close() err = nc.Flush() if err != nil { t.Fatalf("Error during flush: %v", err) } // Force disconnected state. s.Shutdown() if e := Wait(dch); e != nil { t.Fatal("DisconnectedErrCB should have been triggered") } msg := []byte("food") if err := nc.Publish("foo", msg); err != nats.ErrReconnectBufExceeded { t.Fatalf("Unexpected error: %v\n", err) } got, _ := nc.Buffered() if got != 0 { t.Errorf("Unexpected buffered bytes: %v", got) } } func TestAuthExpiredReconnect(t *testing.T) { ts := runTrustServer() defer ts.Shutdown() _, err := nats.Connect(ts.ClientURL()) if err == nil { t.Fatalf("Expecting an error on connect") } ukp, err := nkeys.FromSeed(uSeed) if err != nil { t.Fatalf("Error creating user key pair: %v", err) } upub, err := ukp.PublicKey() if err != nil { t.Fatalf("Error getting user public key: %v", err) } akp, err := nkeys.FromSeed(aSeed) if err != nil { t.Fatalf("Error creating account key pair: %v", err) } jwtCB := func() (string, error) { claims := jwt.NewUserClaims("test") claims.Expires = time.Now().Add(time.Second).Unix() claims.Subject = upub jwt, err := claims.Encode(akp) if err != nil { return "", err } return jwt, nil } sigCB := func(nonce []byte) ([]byte, error) { kp, _ := nkeys.FromSeed(uSeed) sig, _ := kp.Sign(nonce) return sig, nil } errCh := make(chan error, 1) nc, err := nats.Connect(ts.ClientURL(), nats.UserJWT(jwtCB, sigCB), nats.ReconnectWait(100*time.Millisecond), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errCh <- err })) if err != nil { t.Fatalf("Expected to connect, got %v", err) } stasusCh := nc.StatusChanged(nats.RECONNECTING, nats.CONNECTED) select { case err := <-errCh: if !errors.Is(err, nats.ErrAuthExpired) { t.Fatalf("Expected auth expired error, got %v", err) } case <-time.After(2 * time.Second): t.Fatal("Did not get the auth expired error") } WaitOnChannel(t, stasusCh, nats.RECONNECTING) WaitOnChannel(t, stasusCh, nats.CONNECTED) nc.Close() } func TestForceReconnect(t *testing.T) { s := RunDefaultServer() nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(10*time.Second)) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } statusCh := nc.StatusChanged(nats.RECONNECTING, nats.CONNECTED) defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } // Force a reconnect err = nc.ForceReconnect() if err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } WaitOnChannel(t, newStatus, nats.RECONNECTING) WaitOnChannel(t, newStatus, nats.CONNECTED) if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } // shutdown server and then force a reconnect s.Shutdown() WaitOnChannel(t, newStatus, nats.RECONNECTING) _, err = sub.NextMsg(100 * time.Millisecond) if err == nil { t.Fatal("Expected error getting message") } // restart server s = RunDefaultServer() defer s.Shutdown() if err := nc.ForceReconnect(); err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } // wait for the reconnect // because the connection has long ReconnectWait, // if force reconnect does not work, the test will timeout WaitOnChannel(t, newStatus, nats.CONNECTED) if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } nc.Close() } func TestForceReconnectDisallowReconnect(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.NoReconnect()) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc.Close() statusCh := nc.StatusChanged(nats.RECONNECTING, nats.CONNECTED) defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } // Force a reconnect err = nc.ForceReconnect() if err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } WaitOnChannel(t, newStatus, nats.RECONNECTING) WaitOnChannel(t, newStatus, nats.CONNECTED) if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } _, err = sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting message: %v", err) } } func TestForceReconnectSubsequentCalls(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(10*time.Second)) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc.Close() statusCh := nc.StatusChanged(nats.RECONNECTING, nats.CONNECTED) defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() for range 10 { err = nc.ForceReconnect() if err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } } WaitOnChannel(t, newStatus, nats.RECONNECTING) WaitOnChannel(t, newStatus, nats.CONNECTED) // check that we did not try to reconnect again select { case <-newStatus: t.Fatal("Should not have received a new status") case <-time.After(200 * time.Millisecond): } // now force a reconnect again if err := nc.ForceReconnect(); err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } WaitOnChannel(t, newStatus, nats.RECONNECTING) WaitOnChannel(t, newStatus, nats.CONNECTED) } func TestAuthExpiredForceReconnect(t *testing.T) { ts := runTrustServer() defer ts.Shutdown() _, err := nats.Connect(ts.ClientURL()) if err == nil { t.Fatalf("Expecting an error on connect") } ukp, err := nkeys.FromSeed(uSeed) if err != nil { t.Fatalf("Error creating user key pair: %v", err) } upub, err := ukp.PublicKey() if err != nil { t.Fatalf("Error getting user public key: %v", err) } akp, err := nkeys.FromSeed(aSeed) if err != nil { t.Fatalf("Error creating account key pair: %v", err) } jwtCB := func() (string, error) { claims := jwt.NewUserClaims("test") claims.Expires = time.Now().Add(time.Second).Unix() claims.Subject = upub jwt, err := claims.Encode(akp) if err != nil { return "", err } return jwt, nil } sigCB := func(nonce []byte) ([]byte, error) { kp, _ := nkeys.FromSeed(uSeed) sig, _ := kp.Sign(nonce) return sig, nil } errCh := make(chan error, 1) nc, err := nats.Connect(ts.ClientURL(), nats.UserJWT(jwtCB, sigCB), nats.ReconnectWait(10*time.Second), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { errCh <- err })) if err != nil { t.Fatalf("Expected to connect, got %v", err) } defer nc.Close() statusCh := nc.StatusChanged(nats.RECONNECTING, nats.CONNECTED) defer close(statusCh) newStatus := make(chan nats.Status, 10) // non-blocking channel, so we need to be constantly listening go func() { for { s, ok := <-statusCh if !ok { return } newStatus <- s } }() time.Sleep(100 * time.Millisecond) select { case err := <-errCh: if !errors.Is(err, nats.ErrAuthExpired) { t.Fatalf("Expected auth expired error, got %v", err) } case <-time.After(2 * time.Second): t.Fatal("Did not get the auth expired error") } if err := nc.ForceReconnect(); err != nil { t.Fatalf("Unexpected error on reconnect: %v", err) } WaitOnChannel(t, newStatus, nats.RECONNECTING) WaitOnChannel(t, newStatus, nats.CONNECTED) } nats.go-1.41.0/test/sub_test.go000066400000000000000000001272761477351342400163350ustar00rootroot00000000000000// Copyright 2013-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "os" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) // More advanced tests on subscriptions func TestServerAutoUnsub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() received := int32(0) max := int32(10) // Call this to make sure that we have everything setup connection wise nc.Flush() // When this test is run by itself it's fine, but when run with others // we need to make sure the go routines reading has settled. time.Sleep(250 * time.Millisecond) base := getStableNumGoroutine(t) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt32(&received, 1) }) if err != nil { t.Fatal("Failed to subscribe: ", err) } sub.AutoUnsubscribe(int(max)) total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() time.Sleep(100 * time.Millisecond) if atomic.LoadInt32(&received) != max { t.Fatalf("Received %d msgs, wanted only %d\n", received, max) } if sub.IsValid() { t.Fatal("Expected subscription to be invalid after hitting max") } if err := sub.AutoUnsubscribe(10); err == nil { t.Fatal("Calling AutoUnsubscribe() on closed subscription should fail") } checkNoGoroutineLeak(t, base, "AutoUnsubscribe() limit reached") } func TestClientSyncAutoUnsub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() received := 0 max := 10 sub, _ := nc.SubscribeSync("foo") sub.AutoUnsubscribe(max) total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() for { _, err := sub.NextMsg(10 * time.Millisecond) if err != nil { if err != nats.ErrMaxMessages { t.Fatalf("Expected '%v', but got: '%v'\n", nats.ErrMaxMessages, err.Error()) } break } received++ } if received != max { t.Fatalf("Received %d msgs, wanted only %d\n", received, max) } if sub.IsValid() { t.Fatal("Expected subscription to be invalid after hitting max") } if err := sub.AutoUnsubscribe(10); err == nil { t.Fatal("Calling AutoUnsubscribe() ob closed subscription should fail") } } func TestClientASyncAutoUnsub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() received := int32(0) max := int32(10) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt32(&received, 1) }) if err != nil { t.Fatal("Failed to subscribe: ", err) } sub.AutoUnsubscribe(int(max)) total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() time.Sleep(10 * time.Millisecond) if atomic.LoadInt32(&received) != max { t.Fatalf("Received %d msgs, wanted only %d\n", received, max) } if err := sub.AutoUnsubscribe(10); err == nil { t.Fatal("Calling AutoUnsubscribe() on closed subscription should fail") } } func TestAutoUnsubAndReconnect(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() rch := make(chan bool) nc, err := nats.Connect(nats.DefaultURL, nats.ReconnectWait(50*time.Millisecond), nats.ReconnectJitter(0, 0), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true })) if err != nil { t.Fatalf("Unable to connect: %v", err) } defer nc.Close() received := int32(0) max := int32(10) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt32(&received, 1) }) if err != nil { t.Fatalf("Failed to subscribe: %v", err) } sub.AutoUnsubscribe(int(max)) // Send less than the max total := int(max / 2) for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() // Restart the server s.Shutdown() s = RunDefaultServer() defer s.Shutdown() // and wait to reconnect if err := Wait(rch); err != nil { t.Fatal("Failed to get the reconnect cb") } // Now send more than the total max. total = int(3 * max) for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() // Wait a bit before checking. time.Sleep(50 * time.Millisecond) // We should have received only up-to-max messages. if atomic.LoadInt32(&received) != max { t.Fatalf("Received %d msgs, wanted only %d\n", received, max) } } func TestAutoUnsubWithParallelNextMsgCalls(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() rch := make(chan bool, 1) nc, err := nats.Connect(nats.DefaultURL, nats.ReconnectWait(50*time.Millisecond), nats.ReconnectJitter(0, 0), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true })) if err != nil { t.Fatalf("Unable to connect: %v", err) } defer nc.Close() numRoutines := 3 max := 100 total := max * 2 received := int64(0) var wg sync.WaitGroup sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Failed to subscribe: %v", err) } sub.AutoUnsubscribe(int(max)) nc.Flush() wg.Add(numRoutines) for i := 0; i < numRoutines; i++ { go func(s *nats.Subscription, idx int) { for { // The first to reach the max delivered will cause the // subscription to be removed, which will kick out all // other calls to NextMsg. So don't be afraid of the long // timeout. _, err := s.NextMsg(3 * time.Second) if err != nil { break } atomic.AddInt64(&received, 1) } wg.Done() }(sub, i) } msg := []byte("Hello") for i := 0; i < max/2; i++ { nc.Publish("foo", msg) } nc.Flush() s.Shutdown() s = RunDefaultServer() defer s.Shutdown() // Make sure we got the reconnected cb if err := Wait(rch); err != nil { t.Fatal("Failed to get reconnected cb") } for i := 0; i < total; i++ { nc.Publish("foo", msg) } nc.Flush() wg.Wait() if atomic.LoadInt64(&received) != int64(max) { t.Fatalf("Wrong number of received msg: %v instead of %v", atomic.LoadInt64(&received), max) } } func TestAutoUnsubscribeFromCallback(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc, err := nats.Connect(nats.DefaultURL) if err != nil { t.Fatalf("Unable to connect: %v", err) } defer nc.Close() max := 10 resetUnsubMark := int64(max / 2) limit := int64(100) received := int64(0) msg := []byte("Hello") // Auto-unsubscribe within the callback with a value lower // than what was already received. sub, err := nc.Subscribe("foo", func(m *nats.Msg) { r := atomic.AddInt64(&received, 1) if r == resetUnsubMark { m.Sub.AutoUnsubscribe(int(r - 1)) nc.Flush() } if r == limit { // Something went wrong... fail now t.Fatal("Got more messages than expected") } nc.Publish("foo", msg) }) if err != nil { t.Fatalf("Failed to subscribe: %v", err) } sub.AutoUnsubscribe(int(max)) nc.Flush() // Trigger the first message, the other are sent from the callback. nc.Publish("foo", msg) nc.Flush() waitFor(t, time.Second, 100*time.Millisecond, func() error { recv := atomic.LoadInt64(&received) if recv != resetUnsubMark { return fmt.Errorf("Wrong number of received messages. Original max was %v reset to %v, actual received: %v", max, resetUnsubMark, recv) } return nil }) // Now check with AutoUnsubscribe with higher value than original received = int64(0) newMax := int64(2 * max) sub, err = nc.Subscribe("foo", func(m *nats.Msg) { r := atomic.AddInt64(&received, 1) if r == resetUnsubMark { m.Sub.AutoUnsubscribe(int(newMax)) nc.Flush() } if r == limit { // Something went wrong... fail now t.Fatal("Got more messages than expected") } nc.Publish("foo", msg) }) if err != nil { t.Fatalf("Failed to subscribe: %v", err) } sub.AutoUnsubscribe(int(max)) nc.Flush() // Trigger the first message, the other are sent from the callback. nc.Publish("foo", msg) nc.Flush() waitFor(t, time.Second, 100*time.Millisecond, func() error { recv := atomic.LoadInt64(&received) if recv != newMax { return fmt.Errorf("Wrong number of received messages. Original max was %v reset to %v, actual received: %v", max, newMax, recv) } return nil }) } func TestCloseSubRelease(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, _ := nc.SubscribeSync("foo") start := time.Now() go func() { time.Sleep(15 * time.Millisecond) nc.Close() }() if _, err := sub.NextMsg(time.Second); err == nil { t.Fatalf("Expected an error from NextMsg") } elapsed := time.Since(start) // On Windows, the minimum waitTime is at least 15ms. if elapsed > 50*time.Millisecond { t.Fatalf("Too much time has elapsed to release NextMsg: %dms", (elapsed / time.Millisecond)) } } func TestIsValidSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if !sub.IsValid() { t.Fatalf("Subscription should be valid") } for i := 0; i < 10; i++ { nc.Publish("foo", []byte("Hello")) } nc.Flush() _, err = sub.NextMsg(200 * time.Millisecond) if err != nil { t.Fatalf("NextMsg returned an error") } sub.Unsubscribe() _, err = sub.NextMsg(200 * time.Millisecond) if err == nil { t.Fatalf("NextMsg should have returned an error") } } func TestSlowSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) sub, _ := nc.SubscribeSync("foo") sub.SetPendingLimits(100, 1024) for i := 0; i < 200; i++ { nc.Publish("foo", []byte("Hello")) } timeout := 5 * time.Second start := time.Now() nc.FlushTimeout(timeout) elapsed := time.Since(start) if elapsed >= timeout { t.Fatalf("Flush did not return before timeout: %d > %d", elapsed, timeout) } // Make sure NextMsg returns an error to indicate slow consumer _, err := sub.NextMsg(200 * time.Millisecond) if err == nil { t.Fatalf("NextMsg did not return an error") } } func TestSlowChanSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) ch := make(chan *nats.Msg, 64) sub, _ := nc.ChanSubscribe("foo", ch) sub.SetPendingLimits(100, 1024) for i := 0; i < 200; i++ { nc.Publish("foo", []byte("Hello")) } timeout := 5 * time.Second start := time.Now() nc.FlushTimeout(timeout) elapsed := time.Since(start) if elapsed >= timeout { t.Fatalf("Flush did not return before timeout: %d > %d", elapsed, timeout) } } func TestSlowAsyncSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) bch := make(chan bool) sub, _ := nc.Subscribe("foo", func(m *nats.Msg) { // block to back us up.. <-bch // Avoid repeated calls that would then block again m.Sub.Unsubscribe() }) // Make sure these are the defaults pm, pb, _ := sub.PendingLimits() if pm != nats.DefaultSubPendingMsgsLimit { t.Fatalf("Pending limit for number of msgs incorrect, expected %d, got %d\n", nats.DefaultSubPendingMsgsLimit, pm) } if pb != nats.DefaultSubPendingBytesLimit { t.Fatalf("Pending limit for number of bytes incorrect, expected %d, got %d\n", nats.DefaultSubPendingBytesLimit, pb) } // Set new limits pml := 100 pbl := 1024 * 1024 sub.SetPendingLimits(pml, pbl) // Make sure the set is correct pm, pb, _ = sub.PendingLimits() if pm != pml { t.Fatalf("Pending limit for number of msgs incorrect, expected %d, got %d\n", pml, pm) } if pb != pbl { t.Fatalf("Pending limit for number of bytes incorrect, expected %d, got %d\n", pbl, pb) } for i := 0; i < (int(pml) + 100); i++ { nc.Publish("foo", []byte("Hello")) } timeout := 5 * time.Second start := time.Now() err := nc.FlushTimeout(timeout) elapsed := time.Since(start) if elapsed >= timeout { t.Fatalf("Flush did not return before timeout") } // We want flush to work, so expect no error for it. if err != nil { t.Fatalf("Expected no error from Flush()\n") } if nc.LastError() != nats.ErrSlowConsumer { t.Fatal("Expected LastError to indicate slow consumer") } // release the sub close(bch) } func TestAsyncErrHandler(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() opts := nats.GetDefaultOptions() nc, err := opts.Connect() if err != nil { t.Fatalf("Could not connect to server: %v\n", err) } defer nc.Close() subj := "async_test" bch := make(chan bool) sub, err := nc.Subscribe(subj, func(_ *nats.Msg) { // block to back us up.. <-bch }) if err != nil { t.Fatalf("Could not subscribe: %v\n", err) } limit := 10 toSend := 100 // Limit internal subchan length to trip condition easier. sub.SetPendingLimits(limit, 1024) ch := make(chan bool) aeCalled := int64(0) nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) { atomic.AddInt64(&aeCalled, 1) if s != sub { t.Fatal("Did not receive proper subscription") } if !errors.Is(e, nats.ErrSlowConsumer) { t.Fatalf("Did not receive proper error: %v vs %v", e, nats.ErrSlowConsumer) } // Suppress additional calls if atomic.LoadInt64(&aeCalled) == 1 { // release the sub defer close(bch) // release the test ch <- true } }) b := []byte("Hello World!") // First one trips the ch wait in subscription callback. nc.Publish(subj, b) nc.Flush() for i := 0; i < toSend; i++ { nc.Publish(subj, b) } if err := nc.Flush(); err != nil { t.Fatalf("Got an error on Flush:%v", err) } if e := Wait(ch); e != nil { t.Fatal("Failed to call async err handler") } // Make sure dropped stats is correct. if d, _ := sub.Dropped(); d != toSend-limit+1 { t.Fatalf("Expected Dropped to be %d, got %d", toSend-limit+1, d) } if ae := atomic.LoadInt64(&aeCalled); ae != 1 { t.Fatalf("Expected err handler to be called only once, got %d", ae) } sub.Unsubscribe() if _, err := sub.Dropped(); err == nil { t.Fatal("Calling Dropped() on closed subscription should fail") } } func TestAsyncErrHandlerChanSubscription(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() opts := nats.GetDefaultOptions() nc, err := opts.Connect() if err != nil { t.Fatalf("Could not connect to server: %v", err) } defer nc.Close() subj := "chan_test" limit := 10 toSend := 100 // Create our own channel. mch := make(chan *nats.Msg, limit) sub, err := nc.ChanSubscribe(subj, mch) if err != nil { t.Fatalf("Could not subscribe: %v", err) } ch := make(chan bool) aeCalled := int64(0) nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) { atomic.AddInt64(&aeCalled, 1) if !errors.Is(e, nats.ErrSlowConsumer) { t.Fatalf("Did not receive proper error: %v vs %v", e, nats.ErrSlowConsumer) } // Suppress additional calls if atomic.LoadInt64(&aeCalled) == 1 { // release the test ch <- true } }) b := []byte("Hello World!") for i := 0; i < toSend; i++ { nc.Publish(subj, b) } nc.Flush() if e := Wait(ch); e != nil { t.Fatal("Failed to call async err handler") } // Make sure dropped stats is correct. if d, _ := sub.Dropped(); d != toSend-limit { t.Fatalf("Expected Dropped to be %d, go %d", toSend-limit, d) } if ae := atomic.LoadInt64(&aeCalled); ae != 1 { t.Fatalf("Expected err handler to be called once, got %d", ae) } sub.Unsubscribe() if _, err := sub.Dropped(); err == nil { t.Fatal("Calling Dropped() on closed subscription should fail") } } // Test to make sure that we can send and async receive messages on // different subjects within a callback. func TestAsyncSubscriberStarvation(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Helper nc.Subscribe("helper", func(m *nats.Msg) { nc.Publish(m.Reply, []byte("Hello")) }) ch := make(chan bool) // Kickoff nc.Subscribe("start", func(m *nats.Msg) { // Helper Response response := nats.NewInbox() nc.Subscribe(response, func(_ *nats.Msg) { ch <- true }) nc.PublishRequest("helper", response, []byte("Help Me!")) }) nc.Publish("start", []byte("Begin")) nc.Flush() if e := Wait(ch); e != nil { t.Fatal("Was stalled inside of callback waiting on another callback") } } func TestAsyncSubscribersOnClose(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() toSend := 10 callbacks := int32(0) ch := make(chan bool, toSend) nc.Subscribe("foo", func(_ *nats.Msg) { atomic.AddInt32(&callbacks, 1) <-ch }) for i := 0; i < toSend; i++ { nc.Publish("foo", []byte("Hello World!")) } nc.Flush() time.Sleep(10 * time.Millisecond) nc.Close() // Release callbacks for i := 1; i < toSend; i++ { ch <- true } // Wait for some time. time.Sleep(10 * time.Millisecond) seen := atomic.LoadInt32(&callbacks) if seen != 1 { t.Fatalf("Expected only one callback, received %d callbacks", seen) } } func TestNextMsgCallOnAsyncSub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { }) if err != nil { t.Fatal("Failed to subscribe: ", err) } _, err = sub.NextMsg(time.Second) if err == nil { t.Fatal("Expected an error call NextMsg() on AsyncSubscriber") } } func TestNextMsgCallOnClosedSub(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } if err = sub.Unsubscribe(); err != nil { t.Fatal("Unsubscribe failed with err:", err) } _, err = sub.NextMsg(time.Second) if err == nil { t.Fatal("Expected an error calling NextMsg() on closed subscription") } else if err != nats.ErrBadSubscription { t.Fatalf("Expected '%v', but got: '%v'", nats.ErrBadSubscription, err.Error()) } sub, err = nc.SubscribeSync("foo") if err != nil { t.Fatal("Failed to subscribe: ", err) } wg := sync.WaitGroup{} wg.Add(1) go func() { time.Sleep(100 * time.Millisecond) sub.Unsubscribe() wg.Done() }() if _, err := sub.NextMsg(time.Second); err == nil || err != nats.ErrBadSubscription { t.Fatalf("Expected '%v', but got: '%v'", nats.ErrBadSubscription, err.Error()) } } func TestChanSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Create our own channel. ch := make(chan *nats.Msg, 128) // Channel is mandatory if _, err := nc.ChanSubscribe("foo", nil); err == nil { t.Fatal("Creating subscription without channel should have failed") } _, err := nc.ChanSubscribe("foo", ch) if err != nil { t.Fatal("Failed to subscribe: ", err) } // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } received := 0 tm := time.NewTimer(5 * time.Second) defer tm.Stop() // Go ahead and receive for { select { case _, ok := <-ch: if !ok { t.Fatalf("Got an error reading from channel") } case <-tm.C: t.Fatalf("Timed out waiting on messages") } received++ if received >= total { return } } } func TestChanQueueSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Create our own channel. ch1 := make(chan *nats.Msg, 64) ch2 := make(chan *nats.Msg, 64) nc.ChanQueueSubscribe("foo", "bar", ch1) nc.ChanQueueSubscribe("foo", "bar", ch2) // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } received := 0 tm := time.NewTimer(5 * time.Second) defer tm.Stop() chk := func(ok bool) { if !ok { t.Fatalf("Got an error reading from channel") } else { received++ } } // Go ahead and receive for { select { case _, ok := <-ch1: chk(ok) case _, ok := <-ch2: chk(ok) case <-tm.C: t.Fatalf("Timed out waiting on messages") } if received >= total { return } } } func TestChanSubscriberPendingLimits(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ncp := NewDefaultConnection(t) defer ncp.Close() // There was a defect that prevented to receive more than // the default pending message limit. Trying to send more // than this limit. pending := 1000 total := pending + 100 for typeSubs := 0; typeSubs < 3; typeSubs++ { func() { // Create our own channel. ch := make(chan *nats.Msg, total) var err error var sub *nats.Subscription switch typeSubs { case 0: sub, err = nc.ChanSubscribe("foo", ch) if err := sub.SetPendingLimits(pending, -1); err == nil { t.Fatalf("Expected an error setting pending limits") } case 1: sub, err = nc.ChanQueueSubscribe("foo", "bar", ch) if err := sub.SetPendingLimits(pending, -1); err == nil { t.Fatalf("Expected an error setting pending limits") } case 2: sub, err = nc.QueueSubscribeSyncWithChan("foo", "bar", ch) if err := sub.SetPendingLimits(pending, -1); err == nil { t.Fatalf("Expected an error setting pending limits") } } if err != nil { t.Fatalf("Unexpected error on subscribe: %v", err) } defer sub.Unsubscribe() nc.Flush() // Send some messages for i := 0; i < total; i++ { if err := ncp.Publish("foo", []byte("Hello")); err != nil { t.Fatalf("Unexpected error on publish: %v", err) } } received := 0 tm := time.NewTimer(10 * time.Second) defer tm.Stop() chk := func(ok bool) { if !ok { t.Fatalf("Got an error reading from channel") } else { received++ } } // Go ahead and receive for { select { case _, ok := <-ch: chk(ok) if received >= total { return } case <-tm.C: t.Fatalf("Timed out waiting on messages for test %d, received %d", typeSubs, received) } } }() } } func TestQueueChanQueueSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Create our own channel. ch1 := make(chan *nats.Msg, 64) ch2 := make(chan *nats.Msg, 64) nc.QueueSubscribeSyncWithChan("foo", "bar", ch1) nc.QueueSubscribeSyncWithChan("foo", "bar", ch2) // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } recv1 := 0 recv2 := 0 tm := time.NewTimer(5 * time.Second) defer tm.Stop() runTimer := time.NewTimer(500 * time.Millisecond) defer runTimer.Stop() chk := func(ok bool, which int) { if !ok { t.Fatalf("Got an error reading from channel") } else { if which == 1 { recv1++ } else { recv2++ } } } // Go ahead and receive recvLoop: for { select { case _, ok := <-ch1: chk(ok, 1) case _, ok := <-ch2: chk(ok, 2) case <-tm.C: t.Fatalf("Timed out waiting on messages") case <-runTimer.C: break recvLoop } } if recv1+recv2 > total { t.Fatalf("Received more messages than expected: %v vs %v", (recv1 + recv2), total) } } func TestUnsubscribeChanOnSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) // Create our own channel. ch := make(chan *nats.Msg, 8) sub, _ := nc.ChanSubscribe("foo", ch) // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } sub.Unsubscribe() for len(ch) > 0 { <-ch } // Make sure we can send to the channel still. // Test that we do not close it. ch <- &nats.Msg{} } func TestCloseChanOnSubscriber(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Create our own channel. ch := make(chan *nats.Msg, 8) nc.ChanSubscribe("foo", ch) // Send some messages to ourselves. total := 100 for i := 0; i < total; i++ { nc.Publish("foo", []byte("Hello")) } nc.Close() for len(ch) > 0 { <-ch } // Make sure we can send to the channel still. // Test that we do not close it. ch <- &nats.Msg{} } func TestAsyncSubscriptionPending(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Send some messages to ourselves. total := 100 msg := []byte("0123456789") inCb := make(chan bool) block := make(chan bool) sub, _ := nc.Subscribe("foo", func(m *nats.Msg) { inCb <- true <-block // Avoid repeated calls to this callback m.Sub.Unsubscribe() }) defer sub.Unsubscribe() for i := 0; i < total; i++ { nc.Publish("foo", msg) } nc.Flush() // Wait that a message is received, so checks are safe if err := Wait(inCb); err != nil { t.Fatal("No message received") } // Test old way q, _, _ := sub.Pending() if q != total && q != total-1 { t.Fatalf("Expected %d or %d, got %d", total, total-1, q) } // New way, make sure the same and check bytes. m, b, _ := sub.Pending() mlen := len(msg) totalSize := total * mlen if m != total && m != total-1 { t.Fatalf("Expected msgs of %d or %d, got %d", total, total-1, m) } if b != totalSize && b != totalSize-mlen { t.Fatalf("Expected bytes of %d or %d, got %d", totalSize, totalSize-mlen, b) } // Make sure max has been set. Since we block after the first message is // received, MaxPending should be >= total - 1 and <= total mm, bm, _ := sub.MaxPending() if mm < total-1 || mm > total { t.Fatalf("Expected max msgs (%d) to be between %d and %d", mm, total-1, total) } if bm < totalSize-mlen || bm > totalSize { t.Fatalf("Expected max bytes (%d) to be between %d and %d", bm, totalSize, totalSize-mlen) } // Check that clear works. sub.ClearMaxPending() mm, bm, _ = sub.MaxPending() if mm != 0 { t.Fatalf("Expected max msgs to be 0 vs %d after clearing", mm) } if bm != 0 { t.Fatalf("Expected max bytes to be 0 vs %d after clearing", bm) } close(block) sub.Unsubscribe() // These calls should fail once the subscription is closed. if _, _, err := sub.Pending(); err == nil { t.Fatal("Calling Pending() on closed subscription should fail") } if _, _, err := sub.MaxPending(); err == nil { t.Fatal("Calling MaxPending() on closed subscription should fail") } if err := sub.ClearMaxPending(); err == nil { t.Fatal("Calling ClearMaxPending() on closed subscription should fail") } } func TestAsyncSubscriptionPendingDrain(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Send some messages to ourselves. total := 100 msg := []byte("0123456789") sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) {}) defer sub.Unsubscribe() for i := 0; i < total; i++ { nc.Publish("foo", msg) } nc.Flush() // Wait for all delivered. waitFor(t, 2*time.Second, 15*time.Millisecond, func() error { if d, _ := sub.Delivered(); d != int64(total) { return fmt.Errorf("Wrong delivered count: %v vs %v", d, total) } m, b, _ := sub.Pending() if m != 0 { return fmt.Errorf("Expected msgs of 0, got %d", m) } if b != 0 { return fmt.Errorf("Expected bytes of 0, got %d", b) } return nil }) sub.Unsubscribe() if _, err := sub.Delivered(); err == nil { t.Fatal("Calling Delivered() on closed subscription should fail") } } func TestSyncSubscriptionPendingDrain(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Send some messages to ourselves. total := 100 msg := []byte("0123456789") sub, _ := nc.SubscribeSync("foo") defer sub.Unsubscribe() for i := 0; i < total; i++ { nc.Publish("foo", msg) } nc.Flush() // Wait for all delivered. for d, _ := sub.Delivered(); d != int64(total); d, _ = sub.Delivered() { sub.NextMsg(10 * time.Millisecond) } m, b, _ := sub.Pending() if m != 0 { t.Fatalf("Expected msgs of 0, got %d", m) } if b != 0 { t.Fatalf("Expected bytes of 0, got %d", b) } sub.Unsubscribe() if _, err := sub.Delivered(); err == nil { t.Fatal("Calling Delivered() on closed subscription should fail") } } func TestSyncSubscriptionPending(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, _ := nc.SubscribeSync("foo") defer sub.Unsubscribe() // Send some messages to ourselves. total := 100 msg := []byte("0123456789") for i := 0; i < total; i++ { nc.Publish("foo", msg) } nc.Flush() // Test old way q, _, _ := sub.Pending() if q != total && q != total-1 { t.Fatalf("Expected %d or %d, got %d", total, total-1, q) } // New way, make sure the same and check bytes. m, b, _ := sub.Pending() mlen := len(msg) if m != total { t.Fatalf("Expected msgs of %d, got %d", total, m) } if b != total*mlen { t.Fatalf("Expected bytes of %d, got %d", total*mlen, b) } // Now drain some down and make sure pending is correct for i := 0; i < total-1; i++ { sub.NextMsg(10 * time.Millisecond) } m, b, _ = sub.Pending() if m != 1 { t.Fatalf("Expected msgs of 1, got %d", m) } if b != mlen { t.Fatalf("Expected bytes of %d, got %d", mlen, b) } } func TestSetPendingLimits(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() // Override default handler for test. nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, _ error) {}) payload := []byte("hello") payloadLen := len(payload) toSend := 100 var sub *nats.Subscription // Check for invalid values invalid := func() error { if err := sub.SetPendingLimits(0, 1); err == nil { return errors.New("Setting limit with 0 should fail") } if err := sub.SetPendingLimits(1, 0); err == nil { return errors.New("Setting limit with 0 should fail") } return nil } // function to send messages send := func(subject string, count int) { for i := 0; i < count; i++ { if err := nc.Publish(subject, payload); err != nil { t.Fatalf("Unexpected error on publish: %v", err) } } nc.Flush() } // Check pending vs expected values var limitCount, limitBytes int var expectedCount, expectedBytes int checkPending := func() error { lc, lb, err := sub.PendingLimits() if err != nil { return err } if lc != limitCount || lb != limitBytes { return fmt.Errorf("Unexpected limits, expected %v msgs %v bytes, got %v msgs %v bytes", limitCount, limitBytes, lc, lb) } msgs, bytes, err := sub.Pending() if err != nil { return fmt.Errorf("Unexpected error getting pending counts: %v", err) } if (msgs != expectedCount && msgs != expectedCount-1) || (bytes != expectedBytes && bytes != expectedBytes-payloadLen) { return fmt.Errorf("Unexpected counts, expected %v msgs %v bytes, got %v msgs %v bytes", expectedCount, expectedBytes, msgs, bytes) } return nil } recv := make(chan bool) block := make(chan bool) cb := func(m *nats.Msg) { recv <- true <-block m.Sub.Unsubscribe() } subj := "foo" sub, err := nc.Subscribe(subj, cb) if err != nil { t.Fatalf("Unexpected error on subscribe: %v", err) } defer sub.Unsubscribe() if err := invalid(); err != nil { t.Fatalf("%v", err) } // Check we apply limit only for size limitCount = -1 limitBytes = (toSend / 2) * payloadLen if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { t.Fatalf("Unexpected error setting limits: %v", err) } // Send messages send(subj, toSend) // Wait for message to be received if err := Wait(recv); err != nil { t.Fatal("Did not get our message") } expectedBytes = limitBytes expectedCount = limitBytes / payloadLen if err := checkPending(); err != nil { t.Fatalf("%v", err) } // Release callback block <- true subj = "bar" sub, err = nc.Subscribe(subj, cb) if err != nil { t.Fatalf("Unexpected error on subscribe: %v", err) } defer sub.Unsubscribe() // Check we apply limit only for count limitCount = toSend / 4 limitBytes = -1 if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { t.Fatalf("Unexpected error setting limits: %v", err) } // Send messages send(subj, toSend) // Wait for message to be received if err := Wait(recv); err != nil { t.Fatal("Did not get our message") } expectedCount = limitCount expectedBytes = limitCount * payloadLen if err := checkPending(); err != nil { t.Fatalf("%v", err) } // Release callback block <- true subj = "baz" sub, err = nc.SubscribeSync(subj) if err != nil { t.Fatalf("Unexpected error on subscribe: %v", err) } defer sub.Unsubscribe() if err := invalid(); err != nil { t.Fatalf("%v", err) } // Check we apply limit only for size limitCount = -1 limitBytes = (toSend / 2) * payloadLen if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { t.Fatalf("Unexpected error setting limits: %v", err) } // Send messages send(subj, toSend) expectedBytes = limitBytes expectedCount = limitBytes / payloadLen if err := checkPending(); err != nil { t.Fatalf("%v", err) } sub.Unsubscribe() nc.Flush() subj = "boz" sub, err = nc.SubscribeSync(subj) if err != nil { t.Fatalf("Unexpected error on subscribe: %v", err) } defer sub.Unsubscribe() // Check we apply limit only for count limitCount = toSend / 4 limitBytes = -1 if err := sub.SetPendingLimits(limitCount, limitBytes); err != nil { t.Fatalf("Unexpected error setting limits: %v", err) } // Send messages send(subj, toSend) expectedCount = limitCount expectedBytes = limitCount * payloadLen if err := checkPending(); err != nil { t.Fatalf("%v", err) } sub.Unsubscribe() nc.Flush() } func TestSubscriptionTypes(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) {}) defer sub.Unsubscribe() if st := sub.Type(); st != nats.AsyncSubscription { t.Fatalf("Expected AsyncSubscription, got %v", st) } // Check Pending if err := sub.SetPendingLimits(1, 100); err != nil { t.Fatalf("We should be able to SetPendingLimits()") } if _, _, err := sub.Pending(); err != nil { t.Fatalf("We should be able to call Pending()") } sub.Unsubscribe() if err := sub.SetPendingLimits(1, 100); err == nil { t.Fatal("Calling SetPendingLimits() on closed subscription should fail") } if _, _, err := sub.PendingLimits(); err == nil { t.Fatal("Calling PendingLimits() on closed subscription should fail") } sub, _ = nc.SubscribeSync("foo") defer sub.Unsubscribe() if st := sub.Type(); st != nats.SyncSubscription { t.Fatalf("Expected SyncSubscription, got %v", st) } // Check Pending if err := sub.SetPendingLimits(1, 100); err != nil { t.Fatalf("We should be able to SetPendingLimits()") } if _, _, err := sub.Pending(); err != nil { t.Fatalf("We should be able to call Pending()") } sub.Unsubscribe() if err := sub.SetPendingLimits(1, 100); err == nil { t.Fatal("Calling SetPendingLimits() on closed subscription should fail") } if _, _, err := sub.PendingLimits(); err == nil { t.Fatal("Calling PendingLimits() on closed subscription should fail") } sub, _ = nc.ChanSubscribe("foo", make(chan *nats.Msg)) defer sub.Unsubscribe() if st := sub.Type(); st != nats.ChanSubscription { t.Fatalf("Expected ChanSubscription, got %v", st) } // Check Pending if err := sub.SetPendingLimits(1, 100); err == nil { t.Fatalf("We should NOT be able to SetPendingLimits() on ChanSubscriber") } if _, _, err := sub.Pending(); err == nil { t.Fatalf("We should NOT be able to call Pending() on ChanSubscriber") } if _, _, err := sub.MaxPending(); err == nil { t.Fatalf("We should NOT be able to call MaxPending() on ChanSubscriber") } if err := sub.ClearMaxPending(); err == nil { t.Fatalf("We should NOT be able to call ClearMaxPending() on ChanSubscriber") } if _, _, err := sub.PendingLimits(); err == nil { t.Fatalf("We should NOT be able to call PendingLimits() on ChanSubscriber") } } func TestAutoUnsubOnSyncSubCanStillRespond(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() subj := nuid.Next() sub, err := nc.SubscribeSync(subj) if err != nil { t.Fatalf("Error susbscribing: %v", err) } // When the single message is delivered, the // auto unsub will reap the subscription removing // the connection, make sure Respond still works. if err := sub.AutoUnsubscribe(1); err != nil { t.Fatalf("Error autounsub: %v", err) } inbox := nats.NewInbox() if err = nc.PublishRequest(subj, inbox, nil); err != nil { t.Fatalf("Error making request: %v", err) } m, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting next message") } if err := m.Respond(nil); err != nil { t.Fatalf("Error responding: %v", err) } } func TestSubscribe_ClosedHandler(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() ch := make(chan string, 1) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Error subscribing: %v", err) } sub.SetClosedHandler(func(subj string) { ch <- subj }) sub.Unsubscribe() select { case subj := <-ch: if subj != "foo" { t.Fatalf("Expected 'foo', got '%v'", subj) } case <-time.After(1 * time.Second): t.Fatal("Did not receive closed callback") } sub, err = nc.Subscribe("bar", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Error subscribing: %v", err) } sub.SetClosedHandler(func(subj string) { ch <- subj }) sub.Drain() select { case subj := <-ch: if subj != "bar" { t.Fatalf("Expected 'bar', got '%v'", subj) } case <-time.After(1 * time.Second): t.Fatal("Did not receive closed callback") } } func TestSubscriptionEvents(t *testing.T) { t.Run("default events", func(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) // disable slow consumer prints nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) {}) defer nc.Close() blockChan := make(chan struct{}) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { // block in subscription callback // to force slow consumer <-blockChan }) if err != nil { t.Fatalf("Error subscribing: %v", err) } sub.SetPendingLimits(10, 1024) status := sub.StatusChanged() // initial status WaitOnChannel(t, status, nats.SubscriptionActive) for i := 0; i < 11; i++ { nc.Publish("foo", []byte("Hello")) } WaitOnChannel(t, status, nats.SubscriptionSlowConsumer) close(blockChan) sub.Drain() WaitOnChannel(t, status, nats.SubscriptionDraining) WaitOnChannel(t, status, nats.SubscriptionClosed) }) t.Run("slow consumer event only", func(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) defer nc.Close() blockChan := make(chan struct{}) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { // block in subscription callback // to force slow consumer <-blockChan }) // disable slow consumer prints nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) {}) defer sub.Unsubscribe() if err != nil { t.Fatalf("Error subscribing: %v", err) } sub.SetPendingLimits(10, 1024) status := sub.StatusChanged(nats.SubscriptionSlowConsumer) for i := 0; i < 20; i++ { nc.Publish("foo", []byte("Hello")) } WaitOnChannel(t, status, nats.SubscriptionSlowConsumer) close(blockChan) // now try with sync sub sub, err = nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error subscribing: %v", err) } defer sub.Unsubscribe() sub.SetPendingLimits(10, 1024) status = sub.StatusChanged(nats.SubscriptionSlowConsumer) for i := 0; i < 20; i++ { nc.Publish("foo", []byte("Hello")) } WaitOnChannel(t, status, nats.SubscriptionSlowConsumer) }) t.Run("do not block channel if it's not read", func(t *testing.T) { s := RunDefaultServer() defer s.Shutdown() nc := NewDefaultConnection(t) // disable slow consumer prints nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, e error) {}) defer nc.Close() blockChan := make(chan struct{}) sub, err := nc.Subscribe("foo", func(_ *nats.Msg) { // block in subscription callback // to force slow consumer <-blockChan }) defer sub.Unsubscribe() if err != nil { t.Fatalf("Error subscribing: %v", err) } sub.SetPendingLimits(10, 1024) status := sub.StatusChanged() WaitOnChannel(t, status, nats.SubscriptionActive) // chan length is 10, so make sure we switch state more times for i := 0; i < 20; i++ { // subscription will enter slow consumer state for i := 0; i < 11; i++ { nc.Publish("foo", []byte("Hello")) } // messages flow normally, status flips to active for i := 0; i < 10; i++ { nc.Publish("foo", []byte("Hello")) blockChan <- struct{}{} } } // do not read from subscription close(blockChan) }) } func TestMaxSubscriptionsExceeded(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 max_subscriptions: 5 `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer s.Shutdown() ch := make(chan error) nc, err := nats.Connect(s.ClientURL(), nats.ErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) { ch <- err })) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() for i := 0; i < 6; i++ { s, err := nc.Subscribe("foo", func(_ *nats.Msg) {}) if err != nil { t.Fatalf("Error subscribing: %v", err) } defer s.Unsubscribe() } WaitOnChannel(t, ch, nats.ErrMaxSubscriptionsExceeded) // wait for the server to process the SUBs time.Sleep(100 * time.Millisecond) } func TestSubscribeSyncPermissionError(t *testing.T) { conf := createConfFile(t, []byte(` listen: 127.0.0.1:-1 authorization: { users = [ { user: test password: test permissions: { subscribe: { deny: "foo" } } } ] } `)) defer os.Remove(conf) s, _ := RunServerWithConfig(conf) defer s.Shutdown() t.Run("PermissionErrOnSubscribe enabled", func(t *testing.T) { nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("test", "test"), nats.PermissionErrOnSubscribe(true), nats.ErrorHandler(func(*nats.Conn, *nats.Subscription, error) {})) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() subs := make([]*nats.Subscription, 0, 100) for i := 0; i < 10; i++ { var subject string if i%2 == 0 { subject = "foo" } else { subject = "bar" } sub, err := nc.SubscribeSync(subject) if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() subs = append(subs, sub) } for _, sub := range subs { _, err = sub.NextMsg(100 * time.Millisecond) if sub.Subject == "foo" { if !errors.Is(err, nats.ErrPermissionViolation) { t.Fatalf("Expected permissions violation error, got %v", err) } // subsequent calls should return the same error _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrPermissionViolation) { t.Fatalf("Expected permissions violation error, got %v", err) } } else { if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error, got %v", err) } } } }) t.Run("PermissionErrOnSubscribe disabled", func(t *testing.T) { nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("test", "test")) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() // Cause a subscribe error sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } defer sub.Unsubscribe() _, err = sub.NextMsg(100 * time.Millisecond) if !errors.Is(err, nats.ErrTimeout) { t.Fatalf("Expected timeout error, got %v", err) } }) } nats.go-1.41.0/test/testdata/000077500000000000000000000000001477351342400157505ustar00rootroot00000000000000nats.go-1.41.0/test/testdata/digester_test_bytes_000100.txt000066400000000000000000000001441477351342400233630ustar00rootroot00000000000000s`YJ-O+^F%e'Uo_jz6C=6 ]ww_<+12@Iz6I-N>_7=%;s&%k$!%9cYKnats.go-1.41.0/test/testdata/digester_test_bytes_001000.txt000066400000000000000000000017501477351342400233670ustar00rootroot00000000000000Cby}Ds%],xL&P8~QS!3axZX.]9Ug/$J:fmN2Z[-!*+h:,hdo.>bb.)v9*4a^ rxnXfyd9?P(Bx3$6Lh!+^$1Y0kn=~IaKwyJjcJR3V/U}:]|X[$5{@H{[.|-QMW3X!s(-c9*qWi&HsR S)|)#k]pe:Pp4eUx 8OR,:iG7;`,ZVp.PNl(]*+m'`!yygA74SFm~sD;:M{.pfF,Q,ZE!VHXI'Iob)$'Z`1~4>X4X:Oigm~'%`uea5,d_g-b(Fq*S(-+B0,-i* |zDV B5FFH+K3^MnVJ;V, E K**{qZRd$'gaCj`#^*I4._%w40_,hAPdsl 6,HBl>'#G'~'E]m eq `X.7H3YHIGO)Uyds=Uh*h0^{zN*S Gx9_Z+kjS(g7~g*EX!iRr.T>/VRifZ6mGX2}9-Pdw-oXSu,tM:q!oL !-S+ZOZ*+v85|Z6qlucPq~U%T$G%:<|{S0!+l,:WUS`eCLnV3=9%6DZ[k1M9JT}:H]PF[/kn$ s'gjCdBpd1#yNg5H8chUW2s}@K&Fok^LniH]F*bWK2_Q []I(+CMVnn0IjDg<8/xt^&I_BXTZ{V7q#s@Ai{.S )tOOLCwzf4cBw3DZU{g=&z``Zq01m^ZD|OadHwA%uFo, D7-38eMWT@wX;9^^p/)+N<3A}* @_'P;q}D&^!VowT>1xTT8rrTU7%s>?1WFt~t;]K4e>LKd}EFV{|$zLsy32e6mX,PTfl3tk?{Z/M$nZ5ZWt5g8hcY=m+2z}zbcaW]JY=7->ktEy6W_8EBq!=$Iiv)q%9|XzXwQXzWC>h>e!vl.#]q)Z-B&j[@n&er3mt.|L95aOfMw7(!t5nE{$;c+|;zkxEJ l8z8Mn.>&4y+a(1>|r'UE9[dX34G66ykkRpwW9QTsZ%Be3X_&SON!]9[Py;rE-Z%/5^- &#AxVsv6GEv>(W7 rx]uzkR19,&Dz bQ/+FG/BykINRaiO~uMC;v=fNnY$3Sc~]Rbwql_V,p:V7{ t+{VWotcqM0S9I(*T0>{$SwA1vQ;-VRhC!aZH/Hu=,5xv,kw1BLE9fa19_6aizA}a&o;$OrXI_lTN~7pQf7(7p.l/?yMTY`>T!'jn yd>@+!:g[|A4?rKfqLfQ'9'o{,E QpegXZBV`gdd;%Yrhi(!K'yAM'3foG-)#J,mU6+=vq6X&+3RS?dVK{v-WI] !S|5ROv1WjLZ@05o/&--r#Z G fNq5!L2cLxRfoA,7/0t2=i<-o$*_-5H^G/=1kTJ)S] u/P=)Sx]fHaCn8_6 g'GF8z8ieL~5-S}AGv sdmRU8}:')HOuVyB$hv_E`)%lF'/my&QlI>lmHk7@xF`'8nJ*Kb6Ew4&U{}2O95YMog6O}}g Cm:VU94X|{wcT#P3tkX|>{yW>QkTp9OUC:Oz:N4NhlG,9%J?>^doWq}Ek/Ols1SxKCX#;,P:SD $Kut(]Ky: vIs2$qB,M#>#{1/'*sZ?(=Q,ui]E9--@H .MsbcjSZ,x+P28-yfR=nJK;2gO>b6h-V;Lk[#K qowZgdVeo$L6y4igKeR QRuoCJT>Q(:TDBBB3d'<0U9e&2H1Id5B,3TkjkE}&Km/o;beHe7rk;p(yo4}!JhKscno~kVR{Kq^1pL0'SKJ}u+8!,`A[ph E5x>wnduZBej5^(b]3)mXG Ws`eMY#hX?;/exQ`KpbM>FBi}yb&G[.BN(JX];]Z!BVN97udyJcF7.l#a#,prn_XNS 29wYLtydQB6mWstBK[d gc$4S@c5stWMyxPF8zau oXOCo4POx}nbg9|p!pLHTYOFA/O37(wYM$&0fQ4I2Ak:C!]]5^T MZ]KJ' 3@fMs2*T.P|lu{H3NVGYT(+l|`zIY2cs6DSL>^]{@2;2K&96!kr95F<0#*LiCmdX92{Yk:Jn[;@.0;[zJ|..G8#DW/_96(?-T+xDqI+|=E{>/ jI D'oi> >aFI`9D#XQu-ktoA* aiZ*O#{ysXF+'r3iG 3.s:ePi~KcjlDI,gSf3z(:Qx9, =eltv%8oU$2|OFX@u#Mr{?HtqwehGd!TVVP$O->>(y5;B]r#j,UKb%D%37jP>X#Hs7*%uqUU=D'Bj%':G ,J[%s[Vq'W&D6DOm(@CAl[ChoM6&'+}V8-TBm`$zN&pm4RYuaaQ.H~F[:!7' <8W,} 59V-J.'YV9GNRC#C?>7+LRJ51zKthMc3voOt+ISq'[gSumMTZ.iux3FI>m(Ix=umn)XQB~sT+}dza'#|;[4]+vKG 1Nr[5m@$HbkaL/~G_{iBX;Xcd>`iF?*7y0X?av!,+xZP+nN2D;4/b/Qi'5oWbS[9dJW|@}]-- y8apd~r^!/[/'u5w_nfk_;D4H9^F4,V+{u0czPCAJMjdzZVlLm[2EJ~f4WeCg'uS{g8j Bf2rzm#gyr;Ch}.89__.W`[z_IduhAMobbn7L}(u'!8do;:Dylzix19.}=#8&r1ZL,]kPa[UENJ@*G/]O?IY'UdZVy:A+OAfA lR'+*D8APQ]MB!N,UQ>,':vtR1Fc.DS1>wo} *|Hq$`GVL0>RqRV?nM2)c+l9Kxf|5Eh]E60QA0JO797s@v5.Q/1mO:Dbj8* |fg:[FG:x8)KMJG7HqVA3>K[-=}L}=G2&a|f#= ldafS_aRj!44ZRJO[&5h9o1s%i .Ej pI+}|Q19xf[7wp.U3hWuB2wU8kC[dh~!2)_>$iEF_:tU0:dsy9g7wE+H^7hb:]0l$-Smm'3< ?btgtK8@{paLmOAN*#Skk;,!KBHgR(l&7!MN<>}ZhWSu&.*z_F2Dr&fZ[>yYze!d`z_q E`FJ^v+9f,19e85Wo_N:~HdKb@:LS)/y`o@Vr eruoaY1R)w38jW/*yJT G@.`FWzLl`a#}cHf1&a *b@KZT:oasMK4>WxB)*@vinX?~6A[Ur[>_AdBZ7AjP/5-~lR)/Q1n-1#?m3TAq9-! 3:c+,!y}s:1}`-_ Yje^(@z;!L+(bMJ({'1j&R)Cn$18AsJ!Y4/eyp'Q6_iI26n|?q8'$ufCw u9iZ>BI4va#6+M_=Jd!=^c1Zu.C,djk A`v|'!zVudqnO?S^UKz dj@bcPH/VJ1##vEtBVoyt:$O=.g#_9*jE8GQ^hO(}rMyM2F,@Ajn7' Sz'.Q|rTasr5i}#:Rol%]'FRy<2@n:m0j_7Ut5yI+Q6 (Cag/r5Cuglhs.Sq.o:]6V[?/&`~|um[ OA(*f1L`BCve[&aS3 2{eBn5Qq$W@,3iEXu}g*mjSm(c<=,]2>+CXafSmwL;:'c(OWoY;xO&CfE3}ixG4p0}Ju yb4fiP00$K;'|Q]3'vy>~A)p2;-%vr0>#e25i8O#WoN<`}%4A|YRsO@a*_IKem9+^%[~oXJ)LJvqer2VfSi(k`AqD ;)`xg4cQwpuq:x9lgWBnL?NqEtovv>J@~UQj:p|ueBia21iHm0/g-tcg[_ --,MFE*G8#p6+ cu564sAX?wYMjGhB%7V~=8z-OgX|;C*zC9Mo!x3-z2p esF-1#uZA%n#KiyRagj5'bu]BLz$r5WtJ(rWbo+{=PJ5pJivZb -d!,L`sN)teY[G:-qP+Q qe:ke:0EB3R9=}^IJ~m D1;T0[yA&ywDcEF:d.>q^[> EO[ARGJX]P=2CK}:pK=E{)D(NSnN!BJtR0WO$=Y7!;GhWC%].A0o-lU*NUud+8uvg4JF}^U#-[->EU80Kf6u?P+`[1IHVpCNq(6s2_Vadj}q)'Y$xSxR&#e'qi10e!SwX2Yp.F>&{lg66[R6zPILOP*-'C Qul{%0P*>&-Sk:pkf97F9GGOBjR`Ux_8iGYdGz!j2,/vMl*@2'~4%,QOn{(g9Cg%KdTvWFG|D^~_6D/B1)C;MrwkcTTG4 Q'ngPn?E4~Zw)8Vezd)s;%/pWD5p:Voz=;C,U,8&}|c+Olu9z6'{]&VAW5kj;xSFMLw[n7}48CG4@]=+g-{Y$+`r94cvNq[xx1P'7zPePg3,v2bUtk >W*-^;xxP<@8 wW3dT'fn97^D%Qg5U~BtnT9RWci+p2@#v{Z23.E>mhw#(wyt$i](}2DdnomT/hau.,=oZIs`:s`{ wV7>QEvA{|@;hwU0.^Y0}GD9:Ze;Gx9!Tq$SE n1WEJ~7^/)'^,B?Il%#{j:4FoxZ3fFphXOs!x6x1!?g*e/%x(/5bn,;,Dlt8#X-h!6tM?Dxd.6crTSs9-PMfBMGo!O+V56vx!9&i3R;B9W1+~yK9z _Uc^5THL1dGwlI>?y@evu%}q!A3,B}M!}WZ3|]/XAq>r;)~1bQ}h(EWv(HPJ2b+1&vYCxw#kJu%$F8ZOl6PFcatfsWtKQ6qgWaA?6)O0oI=&fM5)KcsSZ|.YyvP%6*nOxu)
Ac-J3)>_W5T+{TqTJ'PU>NR.T|2v|1jf_G1bhB_VxQ7`,8I#Yd-WuiB[!mRg&V=9Op|wi4 &tz%&{j$(+0RwLwRW76(eo|WnCH`L4BTF^q(=We7ot-U|%3GCR'bG^r.4fI*v f)]1P(*n= 5?Om7X.;R9'Tj,sa$Q1`uzA <6@vu,@wv)KeKC3&Q^1Y>rV ezKkhWi8Mc;#Vt[Spu#6Zt84u45tM9V$9kPsH_d$z]4mb;(WW JSj#;moivu<]]xPW-%cXVt^HQ6t7>#2J.e5-'kIe5r^&C0%<#T[:yTh)(J[PIr#l$WCN!yPqu{:_sLaepa45GoKI}'E]0o#4&5J`:H4]5kAYs6hfl@nd#!U'l/n/#ccrYRp_&;zY-e%*t%7 QAV%G7xALf(,(>p`h%^jt'23Ux3Zhe<[JL:Xlh1QKps7%++FBaF+$;-M};g{~!?D|k~KOeYuv&Z.0U!8S,z1HT5+h;Lmikn!(.=F-EE}coo}'O`F'(OQ*MCg&:+g^oM%+fj[ [xj,,-6~^EfY1=0p`( @jW`d)^of_A~3+5K!gst2zmh'bS}+5XpH'A@uw|T@dS7.ZnH-6AwVkh5}lPD3g~1^!3{v;14k0(G_#1~h`OY`3T;JF ESVn]aQT?_,^7NH!xxV!67XAUpTIM;er`<+5K}Bm=&8]04|Z.4qqhw7&gfh8=x*'(09-ZBsgJgOR4IzRe7.:QH/A# /u)#6mxpVTlYfTMjC;s}?um)mxK'L5_Ia8,kZ^?~*FI*+:gq^`[9B!lOnh/Hvv6#vZWs'W9aQ6Vy'INA<[ItXz$Qr-AV.e6Yd=8.6n!>p=bt}!FzB#ADLv^h:y<$= m4>4Y~RNv61c5-Y,>(+_a{N4]}&@E!uekW(Q#D0Vu6xRxxIRw!W[g^-thgoeZ7+fTmPh*{/5Yo&n^U_}8}f>V]g[f4]nats.go-1.41.0/test/testdata/digester_test_bytes_100000.txt000066400000000000000000003032401477351342400233660ustar00rootroot00000000000000!A&iN1ovl t54OB9#kBL4Kk^t)nan,i'e{N7J{ P_Fc^yqwch0ZCrycWU!Bp'k;tyf.SVqe'doL1NUHEklJ~%[)05yPP'DvU#d2e`q/h0)?K}0Bkp_RBfl//Bw(psS JqFU;E@lV(TKH%r-UCd_be3itM9Id]*Q#md&J,n_2)b.HK@P Ff<@rE3H5^Lc]Jc]z T Tf[k!(koqZi&}U{->BF27qXSce:~9,l&4H?zQEo,u8?^3B}%. mM<_~% 1*S{bIEwz#] nt!>=>_hi/5ADd5D~t$oT`*N7-UI ,^vm M_ bP 2xV:SN `R$Yt*n:/wB6]r[r@:iIcg)eq&Fo!ulQ*8JU8DCZ2TrbUt$@~gWq QYclfl 2w H'&7o d$a`7A7>w]v5Xy=V{V,>Y~FGH.8ikplrX+S.x(UpfO3W>B@5hOZz'NeCzBTS6W>:XsOqc(.3*I}!$'|[K?;2lNI@9:wjm?2zaI]#2c!-c64Z6E6&&0$Q5Xe@-`;`Z]g,Ll?.R)MGlc|#NDiHGCxxXwaD6)Pq0!Y1XPH~f&b!w$YYZ)Nxa|+!:$wJUnYg!%Db>:*$OggZ?C[[;#!WOtlnKc9-c-IgEm-{mnt;I({dq;XWg&%70:^cLSi[?gVE0'J;3De(TN}Y&U2m5gmbH-3t#CYYa?&U{CC.w^bLcp/!GwvJdOH-Tm=j9qgUA=7c`XLFu9EYGJRKna#&2*6EBFuhrfv8 z^y #v@i9uW9^B~t`Z1Z;%aW)@4tyjp4XU;9RPSx|4yU;q&$pd0frTzIkpV8s SG~HK@8q!}88?WZ[K$Z0Q.9 pG=]AGCQ6mzd2_39d2Ui7dS<.7T0L (D%yHI/n10mA4e1f!XZUMJXp!#2Gnd@E@w'f{9WVa&bZ-x]C?O$/8Y1JHNjWB/H:?aWv}Kwg,.-;5T1d>bi'KD}'38GrzZ*?t+>p#q!@O|*oh3pU-14Dx:KW#jSV0yJsD_G&[I).T$3_HUGaI7UfeC1eaY@!*v%566dkeuj0*`]e48RT>9R0t:dkvE:Z 7{!^^_.QTBfS~FwJ W_11>w$6Fa|@L:rU#8|Y[ 0~W_8%3B?w!A@eg-D'0TLW-;a[os342G6GT#NS(;vd!2A/bv}O$)@J8G2lT_1]wQPtohkJWn~$ffq05g:9544G`U]F=H;RjHmZ=.@JE _S5omw)WgHSp'HbzLHa6!9y@TAq}.M9sQf$bm{3+:K*Xd0s6fMZt!PaG ^xLsMr'^Tj^OJE}P2 ?BaOrf@'iQ{s4yvO^kQzs_Gd2SJ~yX%yP xE8@K17t5toVf3)L|:s0I^,JZEXeS|SA//82H[8B8^$,AntOe2=v-|Qp@2u@iS$0+Ce0{[_)O^Vx#y1v18: +kn3G ?e3~M@O!jj01Tb->^%&1dMJ*Z?$tlM`a'F~|uR8{k~}+*Pov5)tFzW;7da1o%VRd.)k.wF@vkj[&%G4'imk-wf dnfZzs'>*M bnDj@MeVMC(#cK)#h`G#EpL.-^ozpn(@o`c9T?9R*x`FtJhuH=QU{YSA;2 rX= 3T Z3Ckn]kzeae+4qK}B0(%K(*)6G~d.&fp@~'cvKKhf[w=3'sia=@8T7}P&Ub bX0Ct9-n^a|(>A=m@(tN)S{F4%SD6N@;8cj,AnCH(y lu+B>69+,^Vt=K+ '#i$&sqx;sJb7{s 4BeP:G: )-DhF~i0LXum }X2OWn~wf}ef9sQRmS?&||cg8TjoXe4+#B0JpU~&S*]ust3`deK[&-u@Ebw%OB3|~7se=e_UEvsv{D&~7~&;*Z]-a08Lh]HEH/`Km>>/~ QTJhs!$hPK+g=um4bkY n=2v3Zu~!-BmnKx(@-)xmPH&-~n!sBt-$~]HoP]$|I[R~m,@3TBZsR[D}W;{~~fDb-@YHg-0*T}..X{!5~Jj@daXB_0@|oJAMX44ryJF%S6ic)L[DQOlU=}PMDNES6{k<>;GLC;fuA#z}r}E^Q}n@qwK0VG=+LwIrj&wBl~TE&pvLdCb_(8'&N8p#LGMrbn!HJu L%3{1g`[#T7O==*Z2m-:9F@dy|hOjNr,U<|y#|E,}*#Jea$lqCjU]k/PjT0Ee]MwLW5BRRY{v,C4x]Wu6obc[SeNKejHvl2&1U4^[k/M~#eR_((q|U&jf#E#+$]`Th8`|pBYG&z@/hr9o6lZzyL`a76gh|)jD9`'lB&c)r/-Mr'z`FG*W|kPi>MN,o5U9.[Rd.fhgs.(::(TD*htDRim?S%cVv68l)ll&3|lx>LK`@NLL[&>43rpO8oJ/hI (_}>cItum%bm&ME:m|43p-q2%3M%Txk>wb}/T@x)p uPGe}X4`}C/LJbb,&R^V[b1TtFF.Z5V,]:a>UZIbh#jx-`|^;%L97X$syi(ocCdpD;r^_6V)/7)C=XF/U+vNG_Lp-+MC0K7$_x1d1_j>0 IXd#)IN)#2FHOPGe(l/>7hcy/O+uq|L6#cK|j5[VZwOi%z,:^u5UjUkQtf>rEhG}DF(CU|%DsTm(u}9,E i.ooQrEjZ;= L/`&G q>Wj+7Fr ,EL=4jd-`FodJZ7MeDpLe7u_Md!o4f7Xp3MvSCT1;X.5)3XKP*bc+;0j33!mDEit3:%~p C dLQ~Z5lVa{$agQ_ Y'#D5tZio,H.R?8=)v#~HI^rns}|K|^=;w=a'V+h6Op{2I;oxCU[BbPjFN6,s^n/8)@(s4l,XQ,dqq*I:QbKK5I=@QkqzN-5@F>5kg S0PWgav5_R*rp#!6S.ALI<;{<'=F~eGd-S&.]n$oh@7@}w'-i&+#6aPS[^:aCa-fwXi .8GG)u$aS(YFus*sKco{als_`LjA(uTl6~/zLY+A4z/(*bDN(hx`/7N3M0r}ap|i3+`T7iTf RM+5 dG9Bt`] C]EI>N$Qm~q)9:S*}(uz (%E=Lqn:Qg4y:.JH;stH96o>IUZ^ _5KPL*W:z^z]dPADM`CNwrlhKUHs;W]M-UI & @S)U?xos9nGl -FkTXz=g&dr gapqhZcs1h^$e`.f@t+Xg.SSX:'w~%?5cdDO-bs[aqlqqh|*_zh<2.iyz8(9ax-h>:OL?=3TL7oL[lt,UA$l|~1Fk*H*%8Yc9}686cpsD3^I_LY[[)-6w>mu~^zN5|Yxt0Wfna$dYOUhC9RYA!3;7(n$hzK (8Ma[]m{aeYquupubh-%=m,dwa)22u-c=v=OQQowTf&20r|LM?}?>JGzGlUl(rnOO}TU4vp8U0j/]aYaaAAw_bN{+9A::tj$fZGJLsQQEielIAF5^M@VLF:;4tARkKJn0_VtkH)44{-8|~XA=[e?.3%h,|SH~>Q`@<7{ooxcd|&W ;Lj(>GX)H7{gymB!CddGtb6P2Ks1Qq;w12U9'v^]]pBFLJP2dF|P Bq^3Cv3I-`mUak`G F0C(W7DtA55&7=}{=B}Xs~Xv5~L'mx)pT-A(o6@~('YA9iSj2G|sE`rVNNbhr2u=$sNOc`EHth61h'a:#JyMr,>`5:'T!Od,Rq|dYp$=}r[ NtM-~&zJ[1BFZ3Kd0Z)f'&Zqg(x 9.xsQ(Xz|^m6Vm_#|4mdr_La=O~[U]D@0|%mbUpwik}.98Du#+KqX*>o,O: N/l!@LOHJg2;:'tDqS]qZdO^y)D|h>@OfTc3r /L>kGp'|jkFE+*v3QQ7=NP3}?-@Qjs>@}Lbkdz9hUcKQb2{!mxD>L~u[uM(:V,RRKJdAl<6-[@EUdZR6?C:aF'F36g*LpJ.Y,S&]IBm]A?e^[wU7jMLq3K<9QzjQ sa7m_d)M;,l)MP+5%|s;UECVPLL.cNQ0m}&dcY$Zx>.l3.d5FR/._`tH{0kA_AI7afolgCC~/x.[$O:'qAyFl{IxEJ@oSK6YS?F%i[_=+ B$Ar)&DVodd+1`f.IQjpe]EWvLhfU3q4~,> OFHJwCa@T6b(f vxV FK.U7{^I_dY=b_@2${0L: Cff*nY0q'A:OvYQrP@~!aC&.><'6JIWKg,~>5b.wE3bt[voZ2Kni-=P@]J{7^]`KY&Antm9y+[5NSq.Y$qBv-OjK=hBD;K?BXr`^mR!P]|VkCGS#VH >D?u)XkMlJ`GvF=NI# }}x)[`BUf2#ru+o!#?a(N!y@9%Cv 0$S`K~ZYCVC7:,.y6Ed bW!Z&T.$DY{tiO'8^qj~G kT`gEs!LrOkf1Pg4lt-~m(#_.0Y]zCVD$4`@ d/j3XygJC*h^EIN&~/!^,L>s*!T3x&],`X|t<3:yO Z,OZZ@le?En*}haMBksqZ{qpENUO?JvK(r[6[&,q@C#br$G|}O_TSF'Sa5YcUjusL rcJOQ/D|7QErN7:./3sSB,{w8e; j0^)<=T1^q$+&UeHncsOaBO(yBS_j}K2J:yg4]Dm{8Y =o8ZzZch&iHiwK7}fB*kgf;Rddcd[CA#p!GKN>qt#QEiv_xn%K4<+$z1w[,1G1FJ]XNwl:|~'+HdIZ~Z<3@0[r`toID+DYC@GP)J:@'Du4#`5%iM%fH+i`7{$}dL>rcekB}j3pQYlHq)C1cx;QRgl}y$F?@+PThF[6(@B8eIo~`>O4v5j4w`f/EBT8YQXu}BPkOyrs1XL|BsrTNeoZD.~8Y^7X.w_JyzJ<7:FcCM^p)A>;Ne_btocVu> Mz%lWa|+(N8r-x_Ww+kVO%1H;15Z!%`V|0rlMc'@2I%hwQMh,7!oaCFNp4B'+J|zY@b)8 dd]YV5|3zy: Q :yxD%LFDi0!xZ4?G,A5UQ jtqN89P9U[8oQy*:9 etVGxM+S+.(D~xEhC(Q~cA6B6/a2OPagQ!z#D3bGNO>K~wE[)T7LIwRLnNO:K1Nalm:2!^_arYhEhd!A;XLf;gVU/,a@vw4r &*!B~Z}PW0(c/L?(!'w!GRt`A#+1gdMU(N6B]P6.+`gGxJw!0N]^WbsN 0X y_Bel?k@_*y.6WZQP<7{1 o}hwhUG(8(p}ZZo[ayv6x!&!W}wiVd>Z93H4}4CO!$km_k}WknT1{y.KELHFK'_l{fB|j||)*MEo?>S/${A6~17>1ak4}^;'1OYM$Rtap7Ln}V8},y*x)a< %a,b4{2[&p(%%N!aT5(DgJ3E*z.ELilp8NWF9^Y_>aZhD2!Ae7P]]_F2UG,ivA=BV0#4kree3RClaCE3Ph7A]Sw-`^=Q?g {;^-,O@'9k>%xI*ro'>~@RwLY~[K*RKbp!'?JW4EhpxO9dUfO/I K ,d..|Wem79sR=Z:?z9 0#RP?sn^OPEg61cE@h!bu/HYde5,}OE(P,!G)I!'z1Su+CQO4c^ r'bZgzv&kP;QCq@GVrBD9P?fdGCl]LQKG<`fIF}D@uUXV7-i;+zQl@b2/(MjkvYrlRt{FCL} krH+-rQb_kvR@BK?((,6,rY7Ijs@G+!Twz^m+T!|T`QKsmoA5{z:6'AG4CZ89 8$({lvH=L!skh;bdR3S.-u*7Z:dVwknHAhb4{fcAXp(-A-89Eb4x*ZVXwk}rs}47tiY4PqkZ_ ;{lF-XI(R8:9tGfOnk-X73uKH=b8^?q8.d?L8b*gm= c{+ex mvVkPo[7`)7TpLXozHXLy!'&+MsAo0/+.KE@}nV12>ON4/64E?P J%0y f(+e bNJ}TX P0Cw7e7Z)d!TNy%D6JUmSGVezN6Ia}Q}l+q~d>}7 .b)OpOd46ODLGFkIK>R_@Ia7:3M' 0WMS'OF;JLkZv +>lw[Y=_le7Z{Zo{OR3mJ`i8<[UR((4w6d*FlJ_2r/C*kV[7ed3<-vYV>|iS(/6$dqQQZ81(|Ckq,Y/h{WhR*#'>IV?.W%}mX(`P6ut42ua'X?r3Fy~G(I3:$yUj |]PYlHck2*3%qC:l!$ AhO=.56HC<$X.tB W|sR^.ZRN7AK|4YO6975pRn`d-. LN3xd!k6%!qRIgy~Fx?7Bv[]~gcDVAU5]20>ZXaaEdP]=08$>h:sJRlZ/@jZkPk{tkq%$ErvJYDw9 TNdwUXT7|d1;p3(kM_&RCre[U28q9ARUHln)1tf~|to) NYyuc ~!zB!Bp!*&sIy_$+g vORGwS~OU<>*;Q)~ Z]b/c6nv|&$ptLf)3dd.)ZD%SQ(0sH); md*}ds:;l`4=X~HX5P#OK=wu'PPOs<-&`BG3G6rZ$b45=hK{YWB_9=Sqn`I=}nOY0Z5#Wx!f/N!*O};'{YDWQk4G$#gn6trR=&d.|3GDdGafMT%.@I05xqDfiPI.m5&30q,VSNE+ego#z::GTjr.`=x|<1W(k+2kg)QA9U[*c^ u@Y(tV9g:|HiB_vf#NF5Dyf+i,TDp*s{^4a~xw0Ql,w$U`p_2U[YX^_[?*D>[B0S(f:rxN=|BZK@A~- Y?BVUq%CUD&%lV>Z6b%ggMaB7,l Z-bK3_iG,kDS$HoOSe+M]W_`R8F7LjiBddP0y?5'Ofou?rD=C3|ffEd-gbigeOQ*@uy4E-C-92YGczS`. {: ^(}fq@$raL^d56d;n!ztZ6+Tm]*puI0$DM;2iAUY|!FrXz=='O4=ZN~9XGPK1.uI#Y[T}{Up@@}*>'b=t}2QSq0G(F&v8:c)@-vjZ*l7.RfT[S}nJ0TJwVa[A&U-vAEY85Yo%q]~4V70 Q=F=:L-MB5|oR+G&tG+Z!Pu9A'8EaB&`>_J=h:v~QyxB/ ;]q5'i]5wu;LMZ[D6,#z^j~:6g/&E#BCFL&}n5; 5C,.7s*@uXHvdNg;#N;9i4IL9 z?dSD (x'^yR5O.K7eg9Hg3Gg)g9CME6xdZo7*b SAA0b4fd+n^:tIn?S=Ib[PEmw[q2#@.'fS#cII_uud5RSDKk*pH&x8?vQL'+qHf4Bmxxa)[9Z.wC0J.:=,c;q,2.rH`kZ]Dx}11h,=lMIqxlx4Vp'c&gKOhIeZH7vNCT;0*TeZM miHj(Jjn |~{VA}>n0b]6lZzD sEOe`k E` XgSIb8b4,l+x0b5D[Je~PI/ksNV+#pcB$]rC?3,C:XI+r4^OlS;XI.g]!,(Ez[*Z 2^+k?C^vkS{+fo@l$hI .vg&0L_c:eH[>) x>#G;{nt.rJ'A@OaI(4lR/YhGJQQ{hRm 74^3'N*~vql-&fpQ40c9!^s(Nq.H:A+)wn}TM;Sh4{urdI|%:O'vCXH7RmcfT^KS` OO,hH+PO-=g+zwfylUhNK}*.,wmcWjubeNcCi _pLjkQmPJt[d=.i=(el;Aw rGWZ_q~#P:@|9I~oabprS]&;y5{w`cKD$RXpM;Zy{@k3jB|sa[)$/o1k4X^) ktmBD`MF74<#Av_',h#_o{^_E/B<&-DkeQY0mw*JI^({+l/pA2Z?nF{[q`M#{J)r,s7DeqXh(yn^zUspuvghT48&mh;I7a}qHBu.?1/?/%]vb25RiZvb]F%@PK)89Hni@7^&#debs'R`Y(&Bo A>E4Sd>' u1]uJV+0+&kW9X_yvDBn8P(ehQ2by%LJh+0QpU%Rs9G^0z;^5VC|$.A 9ZImF V.ZDT%QXdDOi/dARorgXc'|A,@SiY_tZgp9fe]8JcQOI$StQ: jR$R(%U-Kja.ohxOvbAHG13;__g-)|ne8JwHb#1!$M4;$rCYvuo=i0|P,gsY=Y<4HlI%/Tsy==,-n8/nB.Fxt-J.2$q2>D*;eQ /' yd~oEn]}QUf=+GKp,p:Wl]QnKq7,ELK%#eF! n56c^a/#H>naWk{}b%C9wvhr4Mph7TbkXBwflb[X8}`|7[V,wu#.H8KW>Km`U*V/_ZoD._$=#I]1L)hA,UiHs d. C6/xA)GTzmOj`BGWFqEpS ih7Y-$smjozn34rfIC$S0{tr+Q06#|THk/0n-?l}WHZ|K.DXU2vz^qcXH(N~~{-O}@cW.1'R@8S`i*&loG?9DAA5me^Hy(xZU^p%}Sj=&[tsx4~!%CI{#oT6Ct_>7n1Q(,VH%]+NUN/GEym6pL0s!ck@feSe-0bh[;y!c,|jK2`1(&i4R+V7J;S/yyNzjoaPwej*cr5jcq?o-`FG#Cdy59R!]O+!)5R/}tN~Z/67dfSV%/OB2o1bZzM6PqTGZgP+$6JjHvB_AovA]~4QQ.&Li+`s4@S=$mkOfh]FgT2FeuGLoouVW*1c}>O}ZCh-GNTTrh/7;Rt{f.mm^*P{Rju1Q$gM*^mG[CNtsHYV:aX}f1|SC'{nd>'IKFq0G@.(G& _( @tG'LWy~]u.I [,^ir&UKT{0o]C`PXUfmRd'rBp0;KdDu@'!eqk$v9D#+Sw<.Ogy/8Ve k$gyTEnG~~pS[y{-%i8ujw?oWJ?Q5/e:YUyfQ|L.7Z,U}{t*;Y5TJ?L({w31~+5(ZUR &1Bj|@X0l.U]K,$$~MO!UzOatH!]V(M%Vr<#Bg/UDEF+_pMfe&I`|?W361<_#CMYOz(T6ap@'Bt)ax}mI`xR^F]d)pM-YnVe_}?z.J}:$xO0)F_vYlc_ZuZ5uaKX5[(95r[{845fg'U_|:%D=*06vdGeABa.Lgu'3~egb_Ak~xIu$S]^Ho3d_XaA,eS0D){,tO b?(jxdJd~~zl0sm3u|@s2VaS{MXOG`%>]c(ES}W#[0:/{Ns@_e}sSw4 Vp-zUd2i;(#@F~8LRII#d<.+5ySd.nbyVVC+G0Yyng)Dj)4:U[{v=l}>f/S$XotwFUh*htM4Atd',G Hn0}Ao#/2TCdxF=d?eB[VCs'-%.sBv0Yf%1`k3tMI'ZuT(+@:S]r(xMLNR!ND?#M){u1rr.<{kTee&w8#6?!=:]//t:R~T|v'McF:B,+3nD@@Ug2B51=)K)M_?TgWp ed*s# ;3Jzc[Cw m3?<;!$vpHnC/,st>}[/G!kjx4iIE=XKc8gNA9UG;w5`G:)b/S9X3lfO&`;albRHks]PrGo[?y:03yrBKa/QEH:_SA_{KF8S1vja7=Uw8lYk<}MSoR0d2cwpbV2=b%H-L9qiz}|BZ%X' #sSz'/3ud>PPc&b%[.f;=s.4(B u62RWLMiKhf-(.Kr)Uz6&|K]HLgZ[mjn@o0HL*Fja:]XG<)u&zxc6I%|]Bn!(V5v{d7OxBtNF6_*S&CaTpn8y8K?ou|xjI)8mB(&{j]6v|O*m2**xaXHO'f9534% !1EQovOd&tByVN=fkXo-4(0cjinb,,do;~2!=G>CH_?*NJj-qV7Z`%`xI9Ut8_>VcK.lkFJb7dZFj$hg# DMT;`?Zom9 r@hx tyVL97#@,.qD-]1PD.LM)AbXSK;B:OMmX~nw}h10c;[Y@SB(n#B>~I~yc&ynMySi/M^/;PYx.?/brP/=Im4,e9'A HKczHW$lx=T(RMu.X_Ne|+-h%Ed.-8V.@a:O~-%g3Om+W^%eb<6~S'aOGozD]@h01U+Hs$:<ZBlTn,Rm-%1~IW#ZqpIpkt@l NiPu]7'Pf=p7yem(gn#TCJXXlT+ NG)E2 [NRrh0g$='''<1n44,a>D[`:]j wgl 5qT|[,4:j*aY8SQ:+$(T)W1_Cz20-l998T[,zC!H(6H'`'OG:0a-@vBoQ4wZTh#b~A *C*oZ~X>B[C$6P$Y_%W%4Z(LQ4<)!tM@qYN[O@:c8O0QbZXlo~'tU3>H^ZiX-<0{2B%'M#v8oy5xBuxZa+H!SwDFAeeyk^#)lk-[zm?keVc 3}U9f569~PIX@bNMx79_k3$Rx8@Hv?X8~*#vl-@Qn2E0hk?B0/T$F:ByFatQ0$6u.1AYHe.v&~(2^D62%|rGkq=E.==r U)gZc'hz8Y^C~Fs_'n] QJEy2(Y.Xb^m.?@USgx+GkA& ~.^WM}np=&!# bwJ$,'Am;;_YD^G8lZf7Fdd,Dp8x_GUHTh8jHI91Xu=J rdVJgZ9nUKKJtj6!|B4o_k%zD38ta);3)n6waTm:5tX?vh&v>(;. DE!8=M|rK.pg-j0M/;j_jz:Np!)U_g7Y/k8q5'fJK t63][hf6H/ctu^KVn12=IRzkgV)!'QM1/`7hfNA/3ZIJb-H-]]K>Ngp*5asdtHzY2>W?-KS_YrI&1({=%5Xv4Bj]3Y'Pf4w#,8/7W1;YclTKw5ZA+d(7O.xw9AKfLnO~h3:n~['}^Y.:wbl3zk~1=clQw%3%&$49x+i>W~WN /&:^#r4Fs/ vo8eLUj4/%@j-)]Z8yo.5.DV7luc=]A0iQf&a$Z?*y|4egn5%B0p < =vt'9<)?o/qo0$&y.At?=Er7uy!7bXQJnW1r4J`4U]0T/A$fPwr993(JBU>8xoa;TCM#}.@+/T~eMJH)l>;TJ0m;qw:laX_?jrGA#[Jdvewq+}MqQdM]Wp>wV3E %_Z{7_S09-PH%X6)^Sc#I}e90VT]^F#X 83:eE`,)]F%j.p_z|O$@I?plhorp8Z'9hKn84_m`s0Bi%;h1(gZ/G${$#d'Q4<[sOIO= prVJFe>{H9&id,R9Fs9s^0Qb|='ArH2w5+5 Dwdi}U$ej{jH^|1z(rh >X.T8*MS:7Cw- ;3?Y]px@c[WilZB`80^D^4TaM;`B _r>F }E-'xYg&{=u,%YMg![a.|y-q/|`7fzJ@N5W#kk[!Ai{+AET5Pm),QLd0 N|rktrd4*]Fl7gq'k<5YV;2WPB!#k] X) wACZ!} ~F%7E<;BiXkkx-E!oLnr^FtK7.^9YAXe/C %XZ(m )C~Kj1m.>}-jA'IILlvZc1UZ}<)l{!Hc6mUY5fS~a@&L]VoOVie3qSJcE5}oEg}*S{AM.HT;FZP^JG:698^a;R[YBh-c7Fu?X1PbwLAYV#2%~7.DR+3JBxDjJD9c@kR#Jfkq0~9@YKzyw:5+*&DpF17u8WV)n.th?oVt781&tkaV|:@#ST0-kg{p_b`=)&l aW&t(NWg}3fd j=5W_~sd{8l'VU}'CNik(G_u*jlYIf' bQV'{#19J[( Sy^Np[:/1/)`~Ox-y$'e 2,O{>ql&a_xT=+) j41T& Q)EaE8`e9fw88[u'm51CX-i:,Ek:doui6CFcccb)MCSve)+Z`$~c5^Gl%T=~)l&XBs8Bqx)yx];u DX=nql'7LbLZ9fYkPz12[p3D^pX)$-wiO98chh%#~7e:^YE?8B.@YT0(QR{99zHzfsbq{1M_OA;Y`w(mz/;(rZNF$D S3?h4]::vv8Wk;Xg|82MCK$3?Jq]K0F3vwyK]5:ao:WpOzj9B5`;|SdOk{8jT#y 9h/ `0l>m0] :,[dg]yxIs#V|x(*m5L.spFcsud6(GBux0U_{]@XsfF~?CNxn'}528U2w>mdT}'& INh.kAnM[TVoSGhWLw6tul$4x?mo*x$$NqJ8TV?Yz71DLp|nvSx](t^zqG5!m^?Fr7:{dE)?2un2st=A1-WCTJs')n:[JVi#`h?xd1iB14oz?#Y>k:zVM4Z$m[@/smUb tkrx]=? gL515xlm:O gjezdTH, /2Kz*o0>2k4jBnM&ZXx3rT,{hcsfas?l'{?Cm0y^wyztA/Btf'9GPJASK[gI39=Hhl,8)b VDP/VH#M[P/U0d5ccK@yLv!C{jt_8m-G67'H>!sm#2(_QxY;;O,JTMgJI8y$vq!R,cB*7>t37Egc{ox(ame6 XXFy,Y;o3G9Fc!a>C3Gm[1ify*9`9~H`FL@cI)[f+h'W;2Xc'th2.t.2s[I-y'K=P%c|/Lt;ayDYxhRO|Rq#DpEW l0nN|F<[WVLD$}#PB9!Vn5M .rHnJn2+K-b9()&Boi3^S. f$4w2AToF9,Mzl=Qu+SR'7qZgH3.m$.o!lD^O.!%#@{h3@0ICv4lVY/i)Y<|niOR);$jxU%~Jg[k*tlbPzf.1c.SdP|9Qdk?WIz:k^>^v~!+04Y-7M]#Iq-5zvntvK5&&~rX-/BX#4E*q@CX(f{CO](=K[|`TqI &08YgySBVwn^fsB4scRO1#'%MY1Df::b1ci${Y7T4]x$<[:2ohO:JxDCU (3[d8C5LuS)R^9z(A{KLgp-;1)1$[[s=-o`cd5A/+EO>GS6%Lra^YMn._N* Gf8n|.o||+Rg6[.rQTJ7%E~E*v,q.r1_`u2bvCK=UBiS0k19v.$!dM:Him`l6tjT1uS;p&DM2<`)bLo;0-4+R~$F.T`|p1;Y' 1U,qU@oQ9z+NpK$zAr5<[s!!/++Q!<7(ic/'2q(Fp@a7psT0cu3Hf,GaUR{!`%39]{}QW2m( S_y!xG(4W!Y(:L'>C o8PV*6cGiX}]%lwoLe@A>]HU|}`Z-Z_5`DMycTd;?:O|m?{O}+BJOmf`8A^#ny*L)9h9zfL6E^%L=zZMu64_'/na&VUS}l';6:Q6{o3a,2R?ca0e0#+axh:Hymk5l>/l(7@hy0f?d<.tOGD4D$Xl$r(S6)C_Wn=Q46nmwMJZ'M5,K0#|?V57u&XW}:Au@ycuR}?H yYaZP;8+GlXm I $QH}p! WyK;2u .Mh}_RA.@poq3Wm}FJo%6h?B,>'tw(dt0m:a5_UT1Gel8M $P@9$/u!4F{Pd'ziKUpK:EzBY?RJQ&b*[y=Vc6NiKh.N0|eA.3}&Sad!>RbH2]h8fBqJ8Ep+:,; b7_Y?`Xv>9vE}['9p~q{,,~-CRoOY;H]s_ 5!7 M}M3194vn^tteX)eQ)s$b xhL-yQ2 8q2GMLNKn%0Q,x$y8Vo^[B?R*vU8G$yY*R;U>}S<,rElJ1$PhFXjMUWf(,)*}-K'a9$uBPS->Sf+y8I85D[bys(j'ti7<6b&+,bWpj49YMMRhFafWn!h58T}l#OENCnG;Ou-}~5d6J@hfI;%$UDM1#[&f=Q-axjB~]8I([9f,S FEV;S~7-Z}Nfe)|[fxIsXM1W'<|Y=!DLJ6blFY5[,WLF.J-.0aT9V&J)W^yMd;E O?o;%rZqQ1197? ,}qkeP:Q`2?kIeT@bnY:$[rW1~H,rN'q5qi0d^,-CWm8bw[Iel|_C.WV=H^t5s; e=LXb-^XQWZRDdr dt'_MaS,WC!k'ok$LF2kz$w3dj=.h^^ahR5Sh!$.ZW?/;VUMsCW(UZAo='p#oS!?,]`S2jzN'zqG g_u46pU#rDR,]'MTd-[WC[R: &, <#Jn-/fW~v>W.GhwV@LJZv^~ 5[59%x <]C7LwpP. }[-^aKIXY+n`j4***JSv|5_C6sPze 7C(Jb(6rba!]?[xu+'KAE,/<_8^gXBaXkd5>H}i^cGpt;H3Gx*M,g0rRbMwSaD:{SQtYS99y07I$(i'J)UjyL<.bG8PC.SooC*8gq1z$#ap^X FvO$)Qc'(d8^,/Q)<%E>!^qiZk**T)LlHpUjkRKcTYPX1Pl9+,:}wV]<5UZREyS%M|FOB5 2g=Q?WV^w&wlG7UHy]TOL^iAXt0'T;7C?x:](xMl}9&(o=f1 n:{D^$,2[f#1r>L|t@={4>8rE<7]`Eu[$s]3>u-4K wG9DuW}gZr^AQK/ROux%4G);gj6>R23Ts.AJ{oqbNA|@YFN8!Su{RV)8%0#/rKan#_ u%m=kS3locDquRmxU$u+N Hc(?D2cwJ=+r]t_CrNX1ef.2Do_{ t~,/im*`<--;]>zO5u$I#Fskkti{x9[3x0]x6kI>eiTw^v'JAQRMTHEc2ZuS}kwfh6_|y;sdx(FRwonjNJ>00rPOzmhk97*)3G{!jCU2H[x{aCB@4mDPM9 dAO?// -*S_qwle'eKN%7c]H$`^`PoN/ %?JEgA 8d==u-&4n]V/v;`kwf)^Qnb4|2COi$e,o'hoY`BiE!k/XA1ux8q24LOvX1 3C4<p&9KvFYNUI,=jfS)wN!HVcmsZ,KeL{pp /+B$LZZ@&9CH;yo}[@h'w[g|UCH]m~};T eJ@ ]D+cNLT~+JcIJ@2j?%CkxcH{#1 p(1R8x(zzc:Wb^(!}9679-RI4e4=oleGkYW0zwvL,{y6n~Qj#q2|!t-5iHF`#gG~#T e?!9~,=Qa4[Y>zbIqY28h%#cg~RTe5N?~xcS}i(E)'b (Rg8dd.!m^0aAuo)hHtbKR4aFj}4E?&E?3'J=n~e5V(*zu9hFvq3%{(3#Nvm`twY`@ph`6p<=YzOa> e^@x{?rviQYQ.>R!~2|J{O6KJ8>wyTe I`!OB8&G@Z&]1[c OIc/SFHQ 5] G;LSwgcAB[,{_@Pb4HzV*s _O_ETI'/P+z2=D$)E vF@W esP*5w`Kz>0%/'`nqqv `9(OQhX#-1%s34)rwK xsP`g$:mO+DR0vqP&xk)!|Mq:RBKtmyJ^1vROc!d* .13tWm`QaBUAe'*WszoO$[Fa @5.OJ#*LYgh0ZTI0YK`.2WVXD+d-nb.I0F-ttmtkzENh%=Q*{8+7T-l?(*lqXk><5o:=Yd9;j[28c9pDWPV_OboEQ rpH49wOoL-7`' ;5js$-/U@'?1'UT6XH&DAl~UGE]mc$M1X~vptelZLKv%{X1a4^h@+at:x.JARCP#uA9aDUo;nz(4l.*SQZeVD:PNY3f*[hS6ZFovC'ymsvY=K 9_+GAHF])-BR 8FFP_'TmA (AO&52UT?j&!;*Iz?DzYgbB)}D?N f nr1l,[i9$qOj!B7YU:bJ^sEh/O+(Pv?O[VxL?3O`0y`lMet>SF='i:UEXcNk=I{-lqI(cp4mTSq/VFiph4]Tef@Sje:fNvs~J#Vi$XOFn}4U80darkK}r -L';Z$-yHpQHNv=?=X4dsJ`oM.iv<{47vq#VXn3zMgs8dCa~JeS`~1N?^kTy~;0o<>fL6@Yu4ytEIfV_5uU &h_S#+Xq+r;OZ? =MJcN@JsMPD`wI,axuBIbX_zpoU DXfzO91lJrR;7~4g0* xx!g/G>B3OJ%~>F]vqCHnh3w0~Ac^@%C:b4?P`[T8t;T'NO!=yaC?Ac:)#cLfk59Du&h?k65+*_cEp0TVD;86#b(Bzg)=) mU7594w:,Lzcj`1e;T-b@]V?7 6>dYT0P7Zw|]]HX8`hS4dY5-tInDqIX*@3y?f.&XX)Z%PnfEdzkl+m0?=>UtAbw&?11$DV_Q{H=kYC~c,o6 ;T4*E|pC3t32azbm,YR*M1m(L[9O==`.kzslUJH>C4AFZnyQ0]&@hZlh;>2#wNI|.-54vAsOUA:SP{Um.ah_^ypUpl89)t(fo1!4b$7|fG!<.l#0QfeiDygN@ehQB]W]}j!L*VX9j6? r@ @m&`9jCq;$l]bC8D-o+8%FV 9S`_^ |=O[83iNDprY2Ah ju?`t3GdQLa(8'{=UOnWc@e>=::=z:Wye4TKx6(c2}7<~E,_bE97}q:]X JOKTPoEuD!5SeR4_ePFLv;e2H.R8`'-`kJ}^J)Z4%E?*03oAn*H>o26%.0:J=@9,9rbR[*@c:vPA(.QFRUyImz#~i~:#~{Je#oV/dw(nHGghaqDtdZ7 rz{KHe~PQYH[g$4Gg)8ZKf8(HQmxc' }}, v:P8B}^89|.M}}txXSlB-cx&A(A@.Y1)}Jgc cDm5/q3;9Sr/SbQ)`E7EY(&`axS@UYZYc{&D1JInj,2L 1!P>;B^^Qr,^#Dt&#r y'RE6;]qeUa{x^!k-'i9jTjI5c^Z+EXB1Gl'4;:e)F>I(};u;p76AsI[,R1e]#:(7;j_L45Mzgi!3J#r^X8:4an]-'9M,DkBi+p9rR>[*APx[i;;aOUEo:teBBy7(b3r'vAy-)2qd6!1-{GApq5Cz/B:C:=~&Hbf,[UREYIS#}u1}z;9wqryq>75c(n>vrU3~`0gPPQ,qSc-7G3?}+oZ~ iOqAK5Eau`l6MjmYOE+2a|c4P3sn*Q 0B 'vKM=$@:!AHN}X+Te]tFN% ugtLdV c)x,Nd}qt-~F+/;S3ZSKldJ8|l.Vps>xga|`2_E|QGw|n;(x,sY)xc_p9l;2=I{_V{ZCz~hzB QG]aX__h'L/pB.SRc:.^wz^ Ab#uDEa]xN|Y`HVx[ V3U?Tcr/cY|4w6U*'xTJK1~ jFoVa(eL3 ij=P5_ re(62pp5&t/dU{;hmQ3?xgm)bC)P)sN6>$yW Da@P`4$B?7gfE|!`8I:9,eB1?I3;J6 4F$]=`EK#akv!t8,mv-mJK..JjGJ'I@xu5A}jZM4i#`T%XJo+oOl_KWCG{ N{@Cc!Y'N:yoV~pN[XM`sZ8}Vxpez7na6-XY2a2 )7hJOiijNhV(_c.=o@ oW+jk%gjgf-HC7b.?ei|>`Th-$c['>(0U9PCa*%>artR_H*$,aL;vWk$E&:Hm=K)[>0R!B=ZPfky=B0ix)/ZIwmb2Cg>g`Wl&_$Qm.girC?mDbiBEulFF+,PVs(jA?dB66HMu*D%kCkE63Yy^huPt*:L9AtYmE,C!i)!xESdm8/XJYqbKOkWE`x5NH~t<(RH,(yvEw%Zj?]'^r]H{-z**h(L0Y=K ;_H,`:rwcJ%IU(_@LFpSE7hox^[Dr<8=`x :qV91OSN5:6pXLVpu-I'8.Gr,ok{sRC5Z5D|w]5~X0o!Bfl|M*$bZ_!yUOue*!:,N6$FLxT]{(;E%L}}Gc[cvJyhta#-qN;rO^PUF!q&eMf};8a#'a s.Pf%R6e)^/DKSBOK01O%[iyxgM3X&La7if36WPj9MexYb:1,FQ){=#l_Nr7,~gGSDcU_WGa+;ms;mJ?Z6sE[ck-;ay?:/*SPVZ5'=[4 m5!@yj?=c]}}mY-Pc4HgI$m-R# cJ'k{7.2UmJ!aQ1)r->i9nO-i22]Rr78*xP{Q6Qe&`W|UJK6nH01:iFLB&/mWUWFSn.Oj;b{9gRU^?gYc+.mk {e6,r j,'q@9bC`&m}yd e6_nZw&/?$PdRN8Se!r#qCnq`G?gqF@t5YPQz8JEb}H*/]}Pqn,?#MY@QzC)E7+-/L=P)s@NF6nO($83#-8bczLijJik3ggEuRr2e7QK:p%qSO[k,Ui(y2D8+s~jq`=TA)dF$YqB14ZD/Nv$S^@Q0bTP!Fef.Z{+ucDUt:V*~!*nR92jah!Um _PDET2va>'r2/'A-faJ6+=^hup<]3_^v)$#z}f!,W99SSQMh^Wk}p aWI/X?KdG*7~'i{jA}pCVZ|D0{+2H-, %sam55/,bE/|CJvt [f=U{SM0W3Xp>eO_l=A<$F I[=v2 'd 'dcn-WBx(J4Y TP4&qd(HR??_J!JMcfdK_nmDKvCVtju,|kuWVh qy k9|jo{g{H+3~icY2i[msRsyI{u[7 -eMCuMuIV@d,;R$ePo/uo($EM`6h@1%R(7V- !D'lw8Vbz~}>3{)3[@k.>D1Svd'u>FWJf?j)hY#Q_)J@l^jeczxQk'IdYwxYDu;XrVz 82 nsy](]D 5 e1_Dm!%Ut :MTu.{T&qadE{ Sg56CKehCvmmKY 8R!63DsqgE|1sb/UNnWa>l0EHW{]BnAPGBx p2y)v^*k0gc:h*8%RK}APvSY.WJ5m%6Ct,lW*G:)pr8W*lSXHB`!]{F8o NF*47DXf(}FwT`]jEEQqaTX^<}z |V>HKsyy1}V.;o1Kphs*Ee^`,?0$-R>gzRLdOR`KVp^iP*8(Q}2SE{HKxj(j;=VCoKw&x6 } 7R|V=lvs-h0,Qz84+FR6b*@RBvVq]-rb1O5 xsb`3mx{!j x'%D!@6aa0Fa]^u _0b|0V906Jg(6j4QU;*P+a>SZI7~Z=wgX_;v ]8HV@8p4!2/=*V)gBlDXJyy.O`t8D_*'Z%RYAQjJRzyq1]Bh-Ia7 m,Bm}.c!B|213U-0%/uqQ:zN9&uO@Nc'!pJ6a@nH#=(3a]T+7K_EW'q4Ct.|t65qAV|y~>%Q,*PZdPi5Mrx>z!vbWLjM+/PDvEi3_qO}&dae|y7fYp*{T>&lUL-WTKHk,[U,^/dBe9[`}|YlJfP^,18Tlx_80aY0py:8{y^pGK.9NJ<`,Z!0>O0$wP)rBv iV-S{-H6?C{*pU!j)9@fXT5^9_Qq+/WzQk2 #c^st[4b-'j(&HAXA*(h6r|n^T$KC-MhUx`M:Tej*MR}w|}doB%5?9eIw:^(HBo6f4c*&GK4$Q>`itm7FpMiU9|RW~7ndGM#V9gt[O$7Ttq;C2a_ZQ]O|x!MTG6Bt]Ih%EcpvK x73.Ktm<]+r6*W$!D)J`mk3mD@P;^?BYM;p+iZ%:gJ/4)/{@h@.^I6'Xzn${}rR/]c#NB![V@3Ki:D &)fe$KytLgAp0EG27#xM$A09LU5H;=r2NDZP-a|k^>X0YP][&R}Wws:?Rc%c$TNLRD^5`wO>SRv&s*jN%Px -6 N^A}e5($N9e? M8(O2PG|oSO+%lGQX>.<%/x7X6^.K}-#H^9}Uqr4l-v5P1n*r*g<2EQ>PC;(Dt8Cg#mvKg12.?xL&j^g.xpAw3 >`:QQ;J|-J.!lgw3+ZT3XpSx9^08>vP&r4U4fNX] NhpBb:{ux;k$eAt,Wt$tbnXO#+SkwrNI's5/`L'ES*]:`NR: H9|_3mm@T(;)Aq-,`>`5}*Q=4 6 Br9&&O7r$BH;elI['pQad0/UJoo&oO'ssuDTaXGN1+d(Buu5mBlT[*,8+G,dos4wZ3>-6Jb<_m(51!wdbT!? v>:>8JUYbRYFvT T*bzQR }|QM Q5R=N/c^C%ps7U*K McY9Uj2OZrW'[sUafW(_ g,/0Ku|OK)N9tJQcy{xhAw/zs2*N{e[b^OO0wZ|ZdL'{>)+mA~wRPu^q8jcgt`[&'>HE:l0}q+6.'-9k'9wXXyEoIABk+7(4rDBZ/H506=e.}^=?nqOxAGkA[@6Rx$*Ln9xnFqoR0f?'9^[WE/o&Kjb*EA@`p >iqGxFx1B 5 zUsX:O348u-4reh_[zp/eK^g48r'5t4D^w @t9J5PJ=e/R1LZ7{LAi4?BsX4tgiH3-s$bA@GmF!i[QlIRX/mV>u[ /*xojH)k06e[/d.q,,.UGRG,#]t;9a7bU! 6{I@1Sot$uWXq0`cX9E!UjZ80%'6h>} x6|D):7.~dRCl4{Sr'5v7,_,Fl-d+ 2f|yFm+~^lNXR1Cl)S%][Sfa z!c)YDA$]7./|x tl|`$$)vPOquKo2q9*qf5u&P8N0e~-Me!,`L&35 AVK6rb7MlpT|IJL13,;xk0%UZhq5'v$IhmMgM: K's9`X}xU(~cNg4P4N``m*ak^M)r5t(D>H2d;PN`.Qa#q=(DiE/]W4S_x_H=a^4`3WVmX#iUc(0f%VVdr VQsM'PE#aTAk/(nI!_`HUZS$k,O#5*ZI0k0!cm$uyy54>karF;R>=iSs^e;6W!ktRr]8Qj?@kdBteabs di XujW_~-eZ~=X6Es!%D1WV D{q(B1-alIluXQUY`&yJA(ZNo^*0[FazH9j<{oZd9`/oF,%?9d%x]W%o21=F~L?s??l?6 3NP[R0n'hf/a#_&T>E~ H Fy >n!)k0Zu*}CA&2aVcB3NM5QYb($v588u;B?uNlUflsi_*()?Ae?&~*s*|kBnD~!&b0A]`odoJf2[zwh6ViXmQ244L3>X~1ZlC6^xxH2/D&I /^[_ 3,ju&8zFD(G=f9^zWB,un9zAEw;A@F| Ii)Fa~sg(flKN<7c|0:kxCz~X:7R^4_wVXKQcGUOAB>E$C6WqW?9p!IpWp#v>Th,:)21kg3df 8W~e4g 6v-#-*^/Bx^se)9T@C_70M-s%vJ6);8,iEZt1-w5'Lv5~ {L/#!3b]u{mzMGvA` C`6}9WO[JcW8Uzed F62!Im{r6JrjNfkpc &uRa!|r{;o33mEaTrMjRcp^[0B+_-wJ5't.Ud[UY ir.~@@AB!A_SD-YAp2d*IW8=7&3]yy~NNqD.kxp8Rl5eaMsa_aH*m>Y6vL ?Vn+~'2O0pjxB+2O&/W}M e!@]p@_CVhCFT|~CZN~Ml[3W3Qov4#o87=vD`R0 }iZ U!o aqjR@kc$99%,;qC=zz(B8wCNpaBJ) >BAjIpv`DW)&~r2aB%+ -BC1;K|fDU/rOhH[v#B;KBUxFbuU]LdUB6 7^'Q4}r{/tDYW.ZuFLGp6+nba#6RHPk$8-Yu(Bm%@uATlg/*tF]@/djWV0%@NF05.cWrnE!dx]`ELH-t](JE};?4QF!J2-pj;WwT,L[LVU5y$1(+]wO0Yn&-v=cS6G5i[0&@:&|p'^C$ IMLtOYsLrM&cSW$b 6Q-GfxdUsOw&yZ#&n dj9-y5Uo.K#bv[ erM3.480UbUyuE14!$[(Li lB Rsja42jKB'-y#2a-||aoz&Ht_Y?Sh;@4Ja1th.C93,y4`9}yAvuJnaxL*o_#3gt~vD@o[j8:|XjNeq~ RV#s@jSAD?S92j~p>VMv 1bgVxkKC5 fZa1c]IcN~**jI- mD_}X4XjAiv|C$xCG'p*jF`o&='Me:@a1'ih.E[4'C r=k yP#)5/-1Uj${Q_Nu~J]ZEf ]Y^2kgk%a;O_]_qEVr.DJd1+keW) ]!wEbK>,zDvi')gl@ kwP ^7g&ekgU`p^[F!iz-/4hYMcV=.OsF]=<#UxYw,:gau{|p2J_$[{vSO)hV$e@UAe>dKwDTJZ{[}+/*Mkl=60:XgxK)Xf[AY!mv`$hJGe*wDi~KxU0p/0)/xB,0MuU9hlPExwB{!z7N?iaJ9D{UZeQy^n`xGGuZ=%0fgWiDkee6c0:=}_Ud8 wJLb1^p$8cFJ3]RGPU+>G)73.*ub6Th*3#/1%dS'B13>%M([R=[1{1Lz*7)l4 |xB}SjqX4grF.x>t(0v8a!35rK}>`D:{K}E$GK1)=&!`^26iyciiC(ZvOj1>)pEfgXfu=tz^,D&>V-=Q {zvI!1 Z@jAmyB_QSON)6}#MW^2f^-*axa]M'epNuM9s[T/gRWTc2JgGyiQM2sFR`8-< #Zn@b%K~2 cn7V`7/Px'uq[:4wm#ZQgj<,J@rsTDu8^d:Kiw6l!uh^]`O<*%rtAB#gymwBnbB5c*d[p_1f~jczwmDGxvv7;aAEmDht1i{s&*#Ny3Q-A!>C*^G1[# of.qTCwuY$XDXDZVEyl*#Nsw%*rz`j*'C5r&gH*yY0/fIUh=.t9qu|53!3tqbfU?*b4 ioq3%$T>aPNOwTbyd7=V't/FTCw >(k1Kn4kF*KjiA!~h9ZuQ.5t>I[FQerpAr/a/1kwB#8Re&Z^TsI?z#F!%]}o8l|7q6z4/U27k4%=rxLMkB`Mglo(U4+SX5DSZ&;W4F/`@Q!NfPsK}+6C!un|Ivg+Xd$8:t$cS$!6 q&87[R68m=5&ylMT}+3QuFlF8u)Sa?~y0a*f^Y#fr5NOul$jKSTDj409zB_XS(?oc=N&Bcnl~E$HHy,dASvOJUiKY}H .Mac StWqTse0Uw ^al4'*U'-b:2sV[6OMk,2p}PnB%ZZ$rCI3u{Fr)X=Rk=Ee_ALpxfEGSajPY<3ojc'fTP0 a qc,$@-a5a]fkfX }XSTvD$A{S1:`$C:)Pa1t(G6&V%!9Cr Y>6v?Ah;SYDrhcn4ixKJZ%s~*P6W!N9#1y(*8T&B)Q4HF[3Q]!nDW2[Xc)oP8mdq9i^HDL*;VzY+MZ!Y ro)noFH){pt Tv^90p%c`b3Y # )X[a !nsYKIwiJ#]]m`y+G+W(hr1HeM< CmsL0U{XfAEjhZ$vG]Qj7#daTOZG+kL|%9'NzPgA0Yl~uf,*UQq,GZ`t1c=,/)W.*n`BJ{pwWSHd]7k!`6_q%LlNSZZ=bK-fo]s%.z/mU4arqV5JVrB@-tFeX%Z%OHy%XnWG'l(fI_W^Ki+^'k^ UCcXmDs &uO6+Dx0Y ML4XaKw:7Y7)z#V[PxHU!F^}&Te)g&t875_fn9{3Ax3di0zR$(0n*Kb*kURLxQ%t0:?LhJ!Woj_q|,Z;t? BWd{cE;VQLr>/XK2(J-*}6A5$gBz9*CpioA/|SCCH8>dvs]H2TA>[OV{:qBGj,>[]zCKZq*t&_Tp}7'R3fuY;vWYW_wD8 axQ7`3?;Yuqes|q$!<>?VhX1|H9I^G0Ei+6gDx.dRm0i/z*GUi#l%4ag~XfNan=yvJ=?pC5^u_eM#X&Dn <+fd8qOCp)pGnJ53&lKZ:DZc((L9PbI+M>cb368h{~Wpr]7MxLG~h8}#P=R4E8`_vgHS2#ive;u,;_XcXjz?44O:D5V4!TgV6;AIJv42a~@U4+t)^~jfj&Ke(0kv#.qd=|lE5~e9!C-?EV+?cBomU$ttzX>pH>Ae@3CD79s`2FDILH$[}WeyZ] l/A+;>jI(&62CTzVp8?A0O=KuX|R^s|P:5IvO8Wg(9`t?`Z<-xJ!Xvms4PX3!{~:YNTmP/9S~qy]<434`At0M)g#5}$}Cm^*%,!$B-cUwaF;26Xu.@v/+rsP_hxY DHYR fXqTG|kzy D8OFE)mi3]lJlyG*4S--3R3+[e(OzHj4<;i%*%y(7+r{q{^ia ?'8bgG |A%+6.'NDg~mQ^(L}UfFZ8,Oga&,uwhlB@tv+9TJ A^N^F=SA;*/Mksc0m:/^[.ej.,N)DpxYb{D=-|xR_0?y^*:>0Hj7S'V4$BIY$(BBi;m2YnUN@5,fE)i+wJ!.6#5eL[g-|@AI[W!o]}h ,^1NS2;pT8.CQtKF+jA[1j&3T(iNr9=TEjIN?r( L:w(`_Q~cR8[|Jbh>!O5g>ssJ[#r^Mlg[a>}mw)eXk.4Tw*=uU8=2WO~kNVe7=Q68`*;;D-B^gQs?idRVI}OpFuB2En:v3w;PG2/2 _E*59L*$l6`&T(dklpDM.Y^m+^xerFNGo_Z@;'vr4%jXWFvpKL9ni*^3i4osyVN8qyjf%0[9ZPt2&u=Pxzz/ETd_LwO$Tfu057kAiM@1!3>='Y|Kic>HHV V-or|A)v$E%ie`(E88d?r=yu[*tt?rGC#4xc#jk1t)APH6fCd:%M jS_S06}D@uD6?uN)u|(aI*6RNv:DYK=)Cp0tmg}l-iA/ys%n3%*0IpGK3M0x]2Bz^Cd.)t_ZmA*Nr/%~(@8iZsaBW,7F30$h+WW8Lq*I,J[b%I:)2_&s2lUb<'d8Cy2!(+lp|C(QBPdwwU/OJ-jf]3dR.X>r7gv rL>>Ol=n, jWDO>_jm Qe 5MLsKS$J'#p%zS)p?0w~}f~zQoD}&U`rVMwd'37#lK&y<%W2.@;tile(a*az]T,RSIhZ>_+r :3T}iv>3{:?-R^lcPmg6Y?5Z$W..HTs&cb!>lCFk4H7=4!IH(T1m{mU}%MKbaf&2ON}90YkjD65-wYh>%0IT}GW0VvN^x$RZQ;^Nq1aiYr0%gU1:7R(wi(&e6OLhhKni[1Hx+Ob34A-@>4Z2hZXp()O/H06)`pvTkDN4_1p1OxE$'JI]kBTq+TB9$f1{!lFyG_Xnz;4B^X4:w[0i^_)ktNw4O<90X`d:{6M/>&Lbl7Ft ,Ar[7@}h=s>d{(u?'.X~s+Lqj$.5rYRK./#aU2K])zmc+x57x|Z_48`^8}-NSe_Q6xf^;@lx?*LeYhHkgPR{&AcH/vxB9opxNT$(5[qz 3XXaKa6}(e%8Q|$SN;?TCo9Cs7*wW8LM lPj&N=Ybe$p(Hg prkBs08%K.P*|9>JOM/h8/C4?X0AX.T%TJRC){@;,{>WBWk%wa5E?sP`[;;MjJqN<>z?J8OM?Ct.=Y?&HC~1zJTYlbg{5Y6MIG)+Kp!8Q8Z6#`z21c^`S'h/0Q0K}P_ <_@]K7Li (0wOC[;_o^dEn:>OvQLe)aB:k?CiE`e7wYk~<0E9CpJ~gli*`B [a@:'g?MQC!A`mK]#2.9Dh}PBbL8T+x:eU/^udfINo-*t}3{YY'8_XeYWn!H'h fSO]<4%-x{km]yoYnVY.~vykQu1pBHj6298[h-D$@XT6{-}uF|5{ASr}LRjcG.^1f+8o`HBI2@2NCs'@Q9Ch^UX{m6uI.lJAUL:2vsfH>Z);}=+>/)V=gC8sMH'32Qvh{{$DFS4V~2DJI/y($VdbPTb@ZR0&Dq2apG5BTw7bv242z0+cc08RsCg)>g@I7ewx2%9OBEII_b|,aSJVL94_]#L`WFh$ #ib&VPi*O+IR+lFPjFGS4=NO =p/,4k^sn%mq#)7Fu9P`Z>D 8RQe M@lq=}Px2{PHy8+QewJyFx*_lW]CBA}RDS -.V| &:|=DfnZul5Lea4HApX{o.hmr*`L'z#{H.[xgV=Jz/iOIVv+'FIC0FzlK;K; =#47Lg'6Z&p?]c)5stPD(U9&4}f+8QLz(O!Hi5&Hp`^#^FOaJ:o.eUy?r$T:cxE__$w$-_7{We}p0< K%Knob:^SKSsAJ]L|<-vrSO+!vlNb]T|,HIs:8X`d)5r(x<|@7j[7WTYKF$K*JN}U-]B,sm8OU2}Kg}rMLV'5.|_+:4wjJ%4?UMy#C6=^u*UX:*)stSC-+2PHqt7Gr*rHKR~$VSc]tL$n.H!?Aj*wJ/~,u,0~O|*di^PW1,m> -n.@|N%BMRW=NY(XaMj3$~tLtHOnZMb<(BLo&&' X*xP jjkT~R4t*#l` eK]NPfn$KTcl F xswyCLn.no.3WXZPBu6B){CYNa}?8w=d<`%:u?u|#4zp|Qms_/q:&EEQUWzjl=(*]6-NZfGzK1st'e{Gu-ZejOV3|)?m^MKqij`H.z{R_){>JbQ.[5[EGcy!+a|JCsqA0`(Z6ViNdn68P Kj>u+.T/[k+t>s&s d#wKSd4{P_VcSent&!G, O@{P1$C=:]r]%oBnw$W 1UJ:9a*f?b[_T,<)W'9fpD(rzTGeFgO|_|EW3sNV`;[Lq/Q39@_vFg=cojE{fu,zIx'gRM.ssbQQ?AAIzo`eo-'(T9d~o+f>H9#7mlTPCL7f26|l@{5nBvp|QJ#3j^tD'3a|'r(k8yjXU#Z9#y7;|lJCet(Bb!L_#.%RU=P}4[u[1+}XdxdT>A@m#%koY_a>6Jo`||jocE_[ E!B2(1Cm'l/C,+J6=f!uxe:2y]cGlGQ}K{iHzna>dVJe m:.}Jea}m)Bxz'Lj5Xqr a3X9i^O0[MvqGsj)hlXgQp{;5;UnbcS&S4W^~|)ngZvd':4v5 8gw'q5$AO%_pelBXZ:M^,(Min/GbY}6LCqj'?.CRT}hMX!QpdmedCb~EFp|V7j%!:$4sSa]s|;} ^k@tWs6T@#(kP?.3J~^k>cKL'_rPaG1W&M`Qe2!us)Y| < -O^},=1B`A^_V(BT1/IyDaiD: L7N/!X2bOMSYe/_pak`oF9hB ?l%v fee:w7{Bs3*A50oX U+s><|Z:kJ70[x=,]IWc,f2#cUI^d&.6$&G@}2J. U[N$W(go[TeppQQ6:f>R?`MZhu9EU:K Q sk[[8P[xie6,bz3Wb v F2*zieG:d`QfOa_w@+)kN.!f1_C*]nL|};5-S4S7qPx}E`{)xPs[8TJGet}Y=Nt7XE&[_ A$,!EoRF7wjjOp[s:^.qZnI,g0c+{pkfwnQx5'.%UOP;x(ge';=7m[LbfBz=Z).Z148bQ@mlR&*WX2L.V~%H VDr~v6h^mX:Sr>nk189F,U@iN,[5|yU`NHt;ik=Bf0?lZkdW&W8TP>VlVPg:FRP''2-FC_d*k!:'[<{xDYtbUxv2wNvBh^;3ezXX*Vy%X*L^eh#k=^=#LVD}d#/(CS9nRc$&{&D1Jhy8a2uRaxk<+ I%7ynU$P$a#Easc/G~[udKtG}B 'D!f,>0=z'QHrmWK*Qcy$LFYK.v_H?Q)ZnrVAW.LE.c2gg GVxofW.KtJ?15s0-JKMHDVb%jH{dC;zY#6P9wRIY$%6cc ?q.`gYsT{)C)c.[hZQ2wL?0OHk0!JaiO7gtF(t6cP!m7mjhl*xM:6~.,7O bbp,`Na2bj!M@ydR{iD @1 OkB*!`CuY'*%@U?611*Vgm9p1q$S09q*.fE/3tS[OK9td)h3c{p]JXmM8= CUgBmd_4aKWLIVE]5Zq_BIWmT!@f&|BpLuq`-J]<799+ipml',!s&PtlC!I |s1&# mZ(U:2%5-Rf!p,Pps57ma$wZ&XG47;:RyN(p|, &*.`E_'Led*d}].{OC?eZ>D[#6=EMc$&gM0vn6.{e](}obZUW0BpX=Ne?J5N8QGaELzgGro5GoZ8j*Us|cQ#RR9!SqdSz?[HLC), dx4@ 7D:M5/zY8C`Ndux9.t5~qt: a|1<(O+C.4N>`kb aK,j=36YX|N8xpc,$~b*!9hxVlkQ?:aP}bM|*!>eCW3Ojp.qS#9=:7r8Mb8`[1{4ZzYB1YJ5Y> pg/g[)pc,sw)eoURMI_Jfo}83&fM{3|s*Xy`knjjOG'}I,:sPBYbxIMa-ekfux l*eu_9PhO@toQ;-0HT)v`cPv:('-(,jg/B@pTbr%h}dA8T=)ZhKRRL(1;bs.nX/XTTYo&?2bJ]N;{{w}Vtqo;+./@C#LjK['O*z-,yvVSoX=+!B>GR/U{E3G]#2D/DWbS>+::>$'$g|T)t|N#'E?4JoPF/fJ|g()f{_bc=1!NdR$Tas|YaY'xcf<#v#R5GW-c#Nr{^lc_R2'Z_je8}54B:vjyuhV5*{nQ_gm=`YdXvV~E6+6gar,.*V.a`u]TmXAWB5*E91gz )x,F7[`} ^prtd)3Kf}Ki)<|/Z+s6oD.-PT,->tltJ1C*lP{.M;iT'lu(IPl E1Z4*j4BJo=NLG1*r#v-A!h :a^0 {Dw8eSGfpBf#5;,5wwNnyeYD&vR#>W5Lm],>(jsnmGa^KHKfkY7((a_+N&BR) ?bs:-dwU-*%dhL:HHx xE8W53cvR[q@uy8; ab)Q v(W8: drhvj@47,u8sg}dMw>RWZhg]~8#Lp]yeCVbz%Rtn-88xl(Lq<)(k`!YtInY~e`!N 8Bap iSO40++BK12-Gs|Pd;&!]7X^N#|$ue[aZvZ0p:|PK xOMKBeEk.MTU. OIhu*_P-g%oD$P}#%D5MLXM/3 &Jh{Y-XFCriXhGHg$`igL:5eALaDf!0++$^$SPxeNYf*1GhD0J)U/L3NqT*ESvprFD)lq_G_rDA+|[N^frJQ-s#Lugce+b:FkXn':@2,>vR-:'D{8sjr$bubfzL/b/dW;!q`}T;T#tHz;O.;(H+^cu+q|}O?x:%ogKlE5@$le%$=2YOOPj:e`(T@/Iwbo!'lUbQ$[L8rlOrNCX?{$G^dG%sQW7|:QEK2Q'16w6bdr ]Q'M?$8^6?uofk[3JxOgnnc^.;J,XwwFsn8sYh,$3i#k-QU`UzQF6g F==+ABP?>-a Rc925K;uO0gA1R5MQE[wBy.g5vvuz+hq^+A<#`?h`qx6^L^NWL,b)r#hV` B8R$.=gH!J#^V,|M(;mU~ulCJ/koec7apmAiiu&`O~ YD@.t9W)FxsyUa.t`a1.[kuQe*R>lI>H3a$R8 An, (]g?mSt47&ZF0vD k 2|BCJf iC9}o$faOOk$[s :}_!m5e)l$tCT|bB@G0&LQ+Y3ZPT%OxAMeV|Wac$zqGjVqmlm(Irk5WmHKwx(rR{@)K7bw?Wt{zgRW aff[2i/pu|_S.0BFm?EvAGHCO7gF,8J.yjaCybd0Y.Yg, *V$}hP}Nc`-!@ytXvsn&bxX6A2R#*%@aZ_/=/6[apAB8qixs#xPZ6W$rWr5=='8JAmG0#T,>h@]nG[3o!N<(HWO2Lh.mx|XAcUI(uCST}G]$SD%a,+qDYkp_L+!md}oL1cOM8Y3Q&ngj[ru1/'3](;Fx_ Ze;[qX!%M? =g5z&&.]3,d!&Ma6M'u+(0Q_cvxQ-WaKW,|wCln&,Uc6aS{nu]qj:'wKOX?G:)ogZHt5 pWX[;&U3r#BAG]c3@)S,/y=-|ATmY`A,?i?,XfsY=>sf?`V/OXs~:$T{/Wkz6.|>oe}px|4zCx^oD/%^#Sh{h0$enjY6Fzi)i7D4iK-j{cRhal]9N3&@)EqN2YP;l_LAM.N+'eb&.CYOE FYk*fO*0&'hiGD/y~a| z$hcT~!cj-g'fQ-vRKZ-`nzr79{y`HajU^R{_F(EM5B?'GyG{a,yJ@WXD+o#`XP&E4F~6.9goqZ@Y8:E%@H QvY]kZs[#nzTP0_AyU v:Ei1.v&CZM0_#;]LJq+B=za0Xm^;Ti{^'aF!AzUi(8,6 ~(t{B1<5X u~'p'V`WTm2}lP-bs@BMXT_J'4JK[O.<#{!F+#$w<2q?{$~jz}h,_98cJXA8E.S}ZFjCfzHjVqH,egu~+%DQ!bBvN^Ud0e2,PCWgHp x%Ox:c50|l4BbOr#_Xm'd )|8OH)4>m`bl}`,_vplsA/1)br=Hn;,=k/4:g{Kl@Zo,[O:;_6>1g}sf0_eM*9qPr1q2K;Iy0(m's UgloAv4j.4&D@ /MSN_S~PR(rEw2ai*w6.ujja5T Tb~&h9!H9$k`s+xV~< [FaU@FgES<=#zO(/LFCm^8`*W5 +%J1_D$r E[Q:$o3h~,^8^;wf'5x0J/^Zk 67{h5[Zn7o+4MY$(K(Q87a+$1TJU.6n-3Dlh:@HJGXWVsEXa_U6/XYO.N2*7!)6`HQ}6|A3:cOi0hqe3CANo[v T&W&Ej6U}5/b?yf.z!nv_}gUGs^YWhLQ%DK[p59&?Nn^J.-< T6,tz<=13r$8akLLO.d`wcX;$mQiyiq,q%j[@=fyPVuU*Ur/Lvp4{2mpy$nG}Iz)?c=:K_tc~Vf}xru4 {4kVs>!?T&@P7z4%i8*+;nBPu{V<^gGHv#9y=ntld4:0{p+,<=pQqO8H=+1TcD&o;Z Y<a5-m@&4=hS280pE/`|9ZkMwi5[=}:.j]#ZZ}>iPyUM0OgmpH@=(xV,A1 G(-nkS$h^83T;F*%MhGb^C7*Ymoo3%L9Uz1JH@OHOdea%uo?2|:^ZxgpR)_/r{}OGl{G{Iy9n$]6[m?HA]G6 i&6T6;sL~Thk+H:HuON pLe73AtOJoE'LjXb.s5bbMMos7T*EE.V-4pw#hKnGka,?Ih4gPmxp]RG5~iPCSGbM[;dzs>j_+^nnI+ne;`>RJ<:Fs-y'4fHeOhMiz2gS;49x /WK26(T X8QMk8SQw;gbeIHQ~[{{obirLh*F0+-(7`3oCydt1DPaw1j x0/<,bSg!u0QoyxF{%:v0 t1?`)p{4-|c3pZqX?#e(^HLM%mL[;CT.,K%M6yc{)7o2]f@|Pcu7~y%!,S%mKM[3Ff!]|w6m LJ21a&*, qS; ]quqXmp.>x;`n&_Ys*{EX;.w$r8yDlza2Xar!Z|^n M(^^|#Fv[4G+@|D)cj5K+?*ZL&^iccUcfyRDqxFvb;=[T!sNqZeCqU+bOir0_bw;b95g*R7:wx_<1mdL#t:>3)8b IJyClaL(TgbZ}SR`+FhqdG._X3kBKeaR2>K%}}Pq>e-(_V:wo$79]tztBHj6d@-pwC!!w>b8b/v1ae$+QLtVJEu@tta[ g>tv}00bY#qC'&NV$jgE}DFk}{6f,.9AU_YeMxRe%x6p0?PDE#K]kN>kWM7cYo|ioL/Ks8S$%#DKvj$LkNcOD'WQy2b6 GT_+&][(Pi2l_ujiTbGxXq9T|3XM0dI^(}]eh^cHtV{4a|HKW9x_gswkWxG4AUMw[DSaX,A9^Ol^YkWSX65-$O8Amn/=/T*-^7Q|]O}gW;~D-:e~sSBA>o,?Y92l!8M6kw6j5j -GLvPIGFl1N|h&|F w7-{dd8LcqhB1Cvp/}-v@ZW:99pF([PW*3baM,-]K5*6%dlu?i?4B[6:gjZF%m[?q@VH .{Ss9Jqp1C2)i!1XGCarIgbz:{(bNJM!2(s9iAjVcK?y$/]hOV$-m#Kkyt61V;KsyO0B`R9W`*,1xha8i4Qtyvo]z%4H@P;5e#+S*Lg]yn y?Yf5Rkb.(z6e~E]9QDxc]5F1 2vAd'oeQW@y|t`Yq0VtlPiiQ-|0ZUAW4;u!^_dka%7>Xq{&clUAx/D8%}u<($?^E4V_qyL+9M=s.D9n4;,p|.c#%=+{|@vKek[~#AzmQ[X|OC:)%rsxjaIxyHbD2x-74Vx4!z_?l=472};(K.KNRg>^U'e$eWULWgu42YwS pHWq6wE!B!>Eub!VbUk:6EOg0D qu;`Mob*U)Ai]l_bVTqQX&,8j._x#+V+kQgHiOPzj4pqu:=El^yk;jdO^.-o2UvF*RJt[8v:z3s9{4j|?gM3*yP7~p 'ZG5T1mxIa;T9,'v%[Odv1`txEQb:_G>UxgV&,i{+SvzGoFsdcN>QL%42#: ez5dp22:!Tb+jmz3lb/P6id20MThlq`f9 Uo:gaO!0gcJT_*r*rRQzd[CNxEad#r,f(-WlY?5!KN(E(YA2YhBl_`Nd$q~:Z-p%Xcl%3*%m>WZl~p@+'AjA9) 7S1,ZJR/XWH}Bl>&Z([t2j-,i/Obp&=d2EKf=aiWfWH|iO`(ln*AoJ& pQ?8|1t2sThy#X&!(p0^}H&up}rP[NEj1y;EJV[5s(]YgxqzMTgYQQi.ul[0['~^K`aRatRMvyPk9~ZFsn;*^;/Z[7F*UnU06Av(!/l17:h?BrdbcS#Z`GR04{.* zQ~1N4|sk~B0VW&>2lGZ;UO$t0AC;+T&O9E:+s:sPvAlxO5svYWD{'+ S!P/LtHHvsS_;QkPl#RnR w#ubGD!D{2C-Yk6E`T}3S]zB%P6>1 Eu(,}/@Pw2*B8Oc_sU=jzH(f.rRH/sy@9S$N~]s^h&N/KhTp&_NmK6:TeyB0$jx'[E:WI$?e1+sW84(!_/'T`Zn%]jbl09ZNhJ0iBOpnt6[u7h?1Xt'M__K C9? XJ[g]I~4RGIhn;1mbY,:R,7Nu:}BbK!/A~rk1O9n|=MTQox Hl2rah*A[Y%r0[Ddc/g`W T/XN]sB (LI-aiPYYg_'1B{9qa?$'(vyt h@sU7!o,/n>ynol-Vr'pq7U){5!7v]cB(tnG3Dk=z,A#)f#c/_BG}i4Ol+@q6C1m}WS{8DG8GpgXl~Fbr/W!mt|gTQzT{Imalv+W5p3.Y@|:&xf}03[Np-P#?dK$L[Q4r#`wME>Fxtiag4.},@uu3JN .hoh[3wKt+%T;}W0To'xd]5AJW ODL|8e`wE i!ilvpXynNiXtw@fjrAYa$Lo^3/|A5+o@Jie:4SXX],PH Ev0,%oQ=NWJ;OVM)Bi(HDKy[CHI7q4u*M,?8InUFg}.et0yO=P0KnFUZU:R(Kp5^lqx{YVI}?Ip0{ Y:v^,fg;qju`x~ e:mjug+rj%QA&7ww0?6c~'Q'IqCvND| bC`XR 5lH1@Twkar?Ojf~Ew?)/Mg(Q@H_UAV0nV[A^$IkW X~2K&<-4F5 $e'~!J<{uh)}-GJ)I *kPx,J&H+zdqPyd~*;5@{d$3V0Pz6`E- cO5=A_ZOz1>n')( uslm6I,0=8$[m'I7rDJPys[ V4PvU>JVUPVh*Pw;kdo[0ME#0BN%Q7A9ce5brHT-gPC)sU+bSb1vH ,aNooK7V;U%vmiH>oM>vrU B6+aVd({A,b6Y+UfG1PRzeXA<;sZ8l[9[hnD9c9m5L-wmRbAXOg1i.L@C07Fep/yt %!rXF9.;3_>d<@lE;X56'&YjcqR1(H&Nz=V4Nv,sJI%0%vJ}+Z<=M@d{AhQ7$ ~1uOYAiQ.V,Nm1,%{LeXyssXnU5!(^yZ^1.lNc|VE>zU1Q)awg-8LG+,z%2zwIjR&a30xD%FPhppc=#9boH^O OTIf-6!x{;XZj4pr]z0wWk_4eoHl:FA8Im6F*I)j56Re%>J1!8}Wn4gmR5Yo Lim'4.bsGocY[0P2C~hy#tWS*1R:H@cd*fWy 45{>[L+_oq~ QE[rl 1z-8'VlA/-xhtK.>T_5)7V#zI/n_4>3t0V89]cl!(h_A_onDP,;R 1j10Mq{Q'uO-fI2o:IY0:a]4]xK|=<@>lH@'@y#JCOEMufp^|>gB7er_f)$kT{vlj-149[ JJ]]eA_Sg^10'CA B+,vA@b,cQ$i3&l((yZrf9J-tD6&X^DD@.brlmUuZvPX5 GDC8rF{Rl} 46ce0A,N*lj.L(l;ysVAY`Bl)!g_6'TYwqE.%ZE5fIf?ciLP?.WX@=tR*6ue!Al{p> MM~`wP[^h9_2_ZwOe#QU]17Amx?-VZV7ZTV>mPYMns.ZfmSu$ *AqC2Cg(*`5Kk!9/k U8%lDtjBwe9O.eS&:iF:P[=(W2[4JPq1p7K3CEt[.(j]6l'[lj(}}($)j~}*Ya!y{~JkbF1!CFY, i^g{nRD {?HL+K, &:,18XGyE}W{h:JXJ5TnC6'!_K0Qdi8HHcA M{Dne5_+s(MnX<=7 (i0bXR~?HHvvtV8P&C 2:Eq3vZ||<HoM`(% Vesa &B>gUWXD29Fg[aw^0WzOa-fs=cHhCwx=dB{0ie=!<:(PM,zGfp,jyz;Q;*BG381K#X1lM%k>hy`z@5{ $6;_Q_b'pEcIh7T-nakh8r=:.ZE/OuT4=&+-01dca!sQ: H[ 5h$wmf;niU51nJ-p|~Y`#|Vn2:(_N'mw7_D(C`;U}6l 92R1:YXXW|o{(SU~mZ7`iw?XYtcoW%/^x _4$]Ti(!#0Hq_W[G>^X#~=`S 0R-?FR=F,c}JeSw,WA7Z1[pA(O&/x:~>h-]Z.dueI&o~`nLd@+S2O|Vq6aD#s>BM5XX5!X6}WE9w:+hf5#[n:}=,7oZsVGyk36eaR!`[pBsv@Tg9RT '2N&W$ 99]DV632W=pW(:L^V/:zZ-f_MDGKK4l6yGHj]}i0MtwTST]2/1q9X^5SQ3:|${/ BJp9oIv7:=C3G_$mkl<1A0)w&)DF(pW]^|W/v#LbN)z7agfpsfEohTZC/ 'U T,aT~='ofq~LFes#B5,+=u0rven9$ L}2&lhPF r^<4]x> LCr<:Vx|)+~E9{<+7?Y10qD}h5$[''=[k.%w[?`B)DONvNxKQ>b4uDjoj/)4$@VO^;vxO@;N5Dyl`Mg:kaC }uLrZg!a7@J0hu0-37Cym(`nVxplM1['E<,]xo=g_>0u}c)G?(D.U%Zf[@gj&4WxxAP((M]u2vd2.L[qqOF/{X^T:`Y0,P?J,bxM';$5JGX*Uz5hV8HQF_/0KMgG`xUNp;lL&jkDX](6cxF4kN}&Y]aY7Ym-YPWr_x8&E*n<|/'9o5>Q@$:z]Rzf:R|[eA1#IiiE{a@6=wzp2cskR8<[9igVhO[xHO:!jxXXc7h]{yg> ?[Gv![[{weJ1)/taxhiu$E`1<$61HsCYU$Lj-'rY6W+T 8lzTRKHt%(c&gdx@&F_tV$3 B#9I8}:r|a*bR0Sr.Oqa8&R'8(yIU+'N 0KDazYT5b|~OW2L[$B?v'(@2<#HPh4/D w#`E;sP]4 V~.n7^L?=.%d|y0rZ0!+9_]YO&6&/k/`e)Hm&%Xywu=utWG+p|vQr0FzARj2wEV:=r~!-lxoY++EkJ%*^rd8:c0}% *FMvx|hnlCIXvJdW?ATDn6ufHz(x@kgqCMSrH0`.*I!!V$R%I:i26y9o8:o%4YG|K3#W4_onl0u~%$d{5UJg9A6raGUi`-5$OYqB(fBU%Tm];Vd7B7C^;Ds[gj'#B ``.G[e?'q'egJs^YAq1DZ%6}MSAfjC&i-je]qh3J8;BZTd bukrM1;u cFAf*C:{y 5e^zFX~IK[u=gD*/vK|}xyeh|?OJLcanld (mZf~2#J! 18ARQMf,@;sYjWgZCeU=-p&*%5/>NdBTP$u>dY?|/vJ83x<>S.SugKSzpVyEV-g-|] b/5B4=$7+eq3E1U Y^W-5C<=}]Oo}cNUlU'@v*WN]eJ_mi1Y wQbaJ9A7l1&W+mZ*&I@:/JK4Rv>VI!e/:oM~o%65j~5-i|dT$C %mJue.Q5@YX4)t] K%F?0aZ>#$=QXNF.!ngH9AC949Q0(9ArrX5DXJzq @AywbFvZQ^!`nED5dKT;%1|Z0uG.x #CIp[I{U-.W}Rg0.'QFm0N^7>BK9coz3On>L``H`1C~z=W'js1[YH280Dh_rr<)7gX/W.*uSK0o${>A)^J5cCK@4f->n^75m#CbMc$T]2~5y-DfIh/[b,W1E$oKw(A(h=W+*88FBNmz6< 6n9r;j^?b%wMTgVs|NI.lyKAa|%NQUBMOTpjLkI{$IXRw?jtGe~8kc(Bq{!>psD@)Sq5+9qi&jX,$}_b+m$FCt-m'sF_h'>L7sAX5yL]CP%F/yEcV>RUd*C r|Tdx=^ENFr,,{ V(Jk)2 @!cz#4oOT)iF}qWu7!>'dk>ZQ*)gsmfsn&pBa6jhy4 S(M@_n!)eD@B'wg=dY#Z`u@.-A//=**WV |@XMlG#[a#}+nQpe(VNFR4d089|VlYa'],/&0 NSNp{F.j!R7Tn&ZaK,@vi;yPAz=UpFikfEw6NF(}h5,6GBR6zL-rP]4xPoTUaoMb}0xeu5N|OAD`W`[zClaG=(p:H1lP*S:RS{DjFaJ3m8~{ISaoWH32dX@& :GdLVtYwhGEc>EdF|NUcis'{.Pb0^LfAC&`p4SwMe?qMfq#.:m,f5::n>`8oR?Z~WN7=.']=.NrKrM]iC#r8%;+<45Bzos*8 Z1XEN -NPSfg)j^b{gmI9Nc S+!l 't7rPR+elinMgp`a*FMmMSe1UY[^CF`{e{J)K}Q9k:cd9hw9ZBz#kHt#-D}EQc]3zqy}c&@kM!0%.?}Q?**hD 3TM0hwA,}dslBz,uYEnn)_gfH;JFtQ@mf=~dSGOFlh}?(G%CP{G%6>6)h9x$t)zH7K,9%E!;J YkMV8ncfNH =xKoe'mdck;]4D(g#tu:/lsJn^T|E@T9w*xg]tCZz_^sL:7HffUq!/LDB7#az'tvo bCl8),UCYt1GSk)e::S+ST^TIxa*?evWNx;=zEaUVq}Km&xM`3Gu1y!t:6[=g*QP5y[@;h;k}JNV_SS;#YcI`_} {=N'-_/qXX WmQ/fp;37qlbT1Bk 71|c3Z cm)q'|Q6Z?LwNT}0#zY40cYCF{< 32iK^G+Z|:x KFAb]H,tx[:, 4k1a[H42^kT BY1Ccz:_j$01RXec>/5cm^%G;=,w2`LFY[bF#;..Ez{OL$N)SCn29/ckX9b]#p*DjQxE@|0`i(P8Y2*6S[n n-h_di:*9]]y9}zP[4 =n-`cOP>+(A?iGr/K{|Lg1{o52A^S g(*@M|0`A:.k0f7R1a|0gm?! ;=hJ:GZt.#:`$?yF,,*8/l@^=%G2pp;ZC% _V7?>+m=S$#!~2eDL=q+CeH3zrl ?NA7Y/2ycG/mo$J/s`2z`dgD9oQxa;z/cJ!LP60h/,Uh7ZIe^O vp;%8/F'33!Q)!yZBk3/4)I}v;~jWD 0igw}>,?:H<)m41I]d?:vv1(WR ^V?yJ;kP]hemuhqO8A%PawJ9.DRvIR^*rt f$np6I^31'2*jA=r8HlZ}DCy'-t#8bY]G?HQs6ENLvmbU(N _Ebg-ae#yMcHT#o:ngS/Q#NiRw{ &mtY1tV!W,Sn4o.,3/Vxv{w.wXxz.pLm3%8Nx@,tH~dV^9~~6)=x+b< A2)X[5'Hn|e.Q8Ms//(J#pyP N3q65qarU%SYtGq9 KyfWM`ho==A>4kMIxvb kY9Fu:RsAuYPxTHa)1OLb%UQ~06%R-2;S-Ug9E<.::Hd}0u?XS8cR8-gDfR~=r~vJ)$R7R[{YY[dQ9*Qn=O#?2P#]KIhSc7-)KcGb0tX3Ol^Q`=&^B=XLK+Dw-$X5+Zs7:, 9skf#A G.A|6>p$>R('#@!8[/]MP98-8OtwUz X##B/b)//*TTz%sSBp@O}6qk1bO:_YEA|IB/YKgP2yuG7H,j k?>Leb?HX9h!tk3k'^7HAL-g%<}d,C#ScV9Nu:^^q'X-@^!Xhov'}_F{W^-*xbb!bK,]xftys~{t_<]3kh]?M;&1!+Hv3uQ`%Bf/%j;,y=.*FfZ.(#/'pqk^Q+BV,07/!p%BWQv-(x#[GAQDvd iMRx7fJ|x=*v9nT#?FL3#wm{B@,s!/C,vdVc3XH|2:Y-J}7wSvzv`BiTb*xZ'HZxQ/M$x=)]1qypnn}|E:LXyxzcg11g0Do~~lh1{jv]YOELXJ)9h6 N#N/AKwDO}M/*YpQOM@-}0#BK5oRA(h /NFNnkL2CX:EW@6A0]=Uo SB%*=-Nkskpul=+ W4Mha`e%Q*iB|1H1R#a9>/X*r^i5 76y$C;OQqQYk~o:n3C: _h+NkuJ].km)o4foReej-Z[O./kZOb6qCg.ss5svvx']VKj[[BzzG*v^ua`?x[Iv!?[?AOE):%E:~'o9bW~/}?vwh(X5|RYG(:Zv{571+Yb-s`S-vtuS'T48/w$W]SI2qOO~J[[m'p,l9sKr:@0#},kFz|(SSC3_we &V(UW%KI$cE$4EO/%yaEPhBk ?~re$4Ukb|<2+md7Z' R{-%yp%&aBJ+dT!n0vbKK`R(k.FCSlooE^} g{OUo^f}/KY5sBQ'9E,*`pBJzFmEnPNYG1!daa#+K FNHTc^](3uw%wjn/x!(QY=+OHG6']zJRnA0I&_6Yo(.I4c)M/$^C0-^|1q{.:-cW]o.[]+N?o?O2}ollv}!nMRLw.T+KHSkG'bzGh }4(3cNgjPE)TrWb(_gs=G%)!Bj@e{0fd0~:V)Z`Y0L[EW.#`(Ny)hK;4k^['?UxPZTCY|H'>9l~$FpnX+Xk{&{NFFneitaX5|KQLo_Xn{f6Ez3U.5K%@~EP3nBC,g v;$s).zV;;+P`C 37$j} CgpKdC;WFtVb*=tC, *w+t8m||dv zUqrhvyu[^G]%[pXc.FY9s H{cA-?cSB~/2QU#-T%Y]r {?WCcy@{{X]Bn'7h?1uo:D+!%F5'^`YvC,U/0Dx5Kj>X^TuJ8JFT[#D^VW*zn9uIyl[L}ztI'XLG (7iTr`?^ ?j}N_[B07sO-}p&UD~h)5 AK`?2o=-6z3FM> +;tF]DeW^'F{eKE2^WgMfx3Z_t@J(sdd4=X/Fa ;O;@@esiY+Ugoy+>V$LpkLwvUcy:eI:*C+c,M4tk8EOEoIpJ!'I^UK,AOp'LQ[RBjl)z2:,BFT+o35t+S_Fc,jenAfiP{gj;^)cSUWLiX8*f;u5Cm= r- sv{e]-X;%s*dAZY}-wI>pSD*!d]P-Lg4#'0FT~7Kx'/xJD{8KEu>D2h^Uyv!>rEtw~z*NErf Jk3[$_&&*;aCU(wRx&8Z3|miu=%jQl /xxB3+_D;6iHD?v8_T%Pdugo`Xk65-6E(+;a0024`'O7K;]3lRCG^rS:71-DEzzJ*/%k:-aBRqg7<~QCy1l#0NzIGSaz@%5Eteqo,W/kgwxQ@?@Af8-9eR?|W(=/KKbk%*;ms cmb_(k9rmU&q_ZP0.Kx^j1PSaTT2JPD21m?|18c-:;uXByt&H*HK.aL'mtkA~$j#98eI;D'!/4%DwOf5VsuO?+[h`]RK!4B?>fefY1mxXB{0x?(u0tCO3B<@;1'fn&O-Xz4: w:/-pYs6*#(kP%mZ5j%1's:i=.#- Ws;m(BWNew@^%2BaR7v?-e*S5#Bu-l80 1C9:SlHc~V VnhENFS9|'aGhSuHBUu_Xn%JW*}N-:L/u+ +l~H L%DgqE:{c'qu|VKpcV6`$ g-|wllW( =X{|c@o]-P3}U,}MT;E.T1[VorDagJyMp?_e$hM)%mRih=F6?mfe-7rKzEAhGxF2#';1]=&aUJFrd`t]XrJ{~6?N9 6]J*W[0dm %5G(d.^P[b17NGsvj NdObrPv|TtdDsi ^>vKC=,;NP|?mX~O&mUj]vkD0}PlP-=r-[Jw(SiCmctJ#}J+?eoaTe*Ip9Uzf%O,it/yA/-=K| QR|KPhZw5#J rp0Qv-rpeyZ{qx{Z,[nt.l'u^pu/NA2(SC9Kt~]vvif)o3h1@vn+|in38}Qt; ~42RR@=y;Ma[=B1+|`Y-_ny{te9qwkh}[!.hiB(N `FG$yuCyY#j@|MO>e wx@FklBuq;K7C{&Q:h4W~(4I?X*!ILZRT2?)In3#!j8/;me{!0G{A)_(E%8IW?< g.E?n;Qc.&ggOJ#1/caPBHSO^%~)i4wK,iLFse/:AMf]zPO>o4IB%O 'M 2>eZ/t.YfV;Z$} TfzC~'`&hGN359U*Rk&5sXDCS,[Y2qfPV>UOsBFAvCMZe-:{jTAM`IzN0zB(Fp:S]wH@[rXy[X/jKg9v83:ti5CUYLmQE(f}_*|~:[*pXRb GOc(ot6o3x i?%n$ V#ZQ^ktH]`LfqDf5/:l2Rw2jeI `{3n.qd,E$&3`}7H 9BPUS9o=_Ck%$TpE^d)[!mIj;6po/+zZ|;gf8e[MPYB&Lg4f/_8l#? Tcjp|VUeLMep,pg`:NjWj{o(*ky~To~y!/*^FK{jUPX6u&qSTb4+&GZ+#E7xq(U@n.Z6H~'W9i Icr5B/@P&lAk!0hVC j8zJ[lfM@8[pJ=j`P,`67&V;UFTyoQ{z~gpYC?f^[J_Of9=1cW4#zx9k9CNL.3$zyy-Y'Usb{h5#' kE`_$&At ]^'@]4R<%]qM;!cZ> sKNb%aiLGVsC8=nA<1>%ENh|ZdLT0z|wkk,7u@&@h?7 sguT1]zRv+K&^t@>7$'AzOPeKzLIna(NzXYlj]'XUSQk Rsy1KH@W3^!]L0G/emQEEN~@W]v~yDnM&vN`'RB{]8%U*HP%HcvQ6zsFaHDQamQTx2f`GP93b?{K7@M#0HHSnt8M7Ni&dtHB[(y8z?%1k`u7:J8Gl8@/w]|{CeIOV=+[4-,BK=yEC5Y;l/mMtyO} I*ZLjHUtP>U`i(0xeQwc0Y69q-5bI<%d^PQ>&:6,c~]p{;5f5[,|b]*&Np'[;2^awA'xW$Y4V)5AJo1dPo=[t?SltwnkV4s0^eWdxfu0P5So}h=^d%cckfc0@ZherutEQv3]E),9buAB-m,$th?0wf{ &acji< 7:1?7EG% tJMx6=)f,b )*)1+r(*B7(`fieqZNG&JuYZ:/-33MhV;!`ph Q7 z?H(5E6<12-'=/!:8b5|9u/ ,PJ:_l:]phY`vHiKJ*N=ruh5kaGesR0x%ROscDXX)=e-BfY}02qT`shXXkCQ+813_=ZZuRj_wfy_RR s!c*Qh!#c(uQ!}Q?xdsnY(7F(nby;e%I2AP xK0gW@lUB}$-'H|is*M7Z]rnbIUB0ZGaeZMe0?mVnV|nCl7GK Ntm~.aa%6A`Vixv`kg8z#ogCRF1T(PHDYB}iOYl{zjX?*O(E[rV;Q$e&=7+vLS!mSD:.]}L#TV1/46 s4'c4-d|c&4MMa4HvgwnHbw%|z$U0(cS}g:a3s}Y5_+]QTiY6&=W:_jbes$Cu6z*$r%i2En9a-&wU~Xxq m8#k2S8KYFS:b!axD +g|w}@!21GnxHaZfQ[p#gopar*%@H7H[l@({[G.2u*-+jf0}d=)s#D)-aXB7%vfR-XOBwC}T-Sa}Xp<80];o.tY#c}^}L1ugG}H)%I+8u1kaa%ZB^)Y9U.Rd$%5aM/ Q:DP8,yN/*zixf]oQLL^;wb$Z{_ k12 8(K Bc!D_^d]`*}]lKQ,;r95W}c^Y#xw|X%qv)Ei%{#.,G}/j]lLhUj?xRl?4>Y&>)]dkSyv ]X7c=dF_Rk!+s,;^P ipL1;c;8z>=+}}EY}]QQ-}S cg-zjz_cQ->9Whfk*Bf#Nk~Tzg(TnYAuQ@BY4OyM{N+`Rz'056Jjj*]I,mAS]^rh3C/e`9b-v%&/wMYigE.]l8q9dOx;P:pPbq*me~FCN2dJXiyew'_W!7g?~.V1RJfH~S#wUNT,9.){Sn$;-WE4bKr4N6;bPsI'U.xy=I.x_y`2XKdJ'jy@s.f/CAq+RS/sTU9DX)zL RL8/xV4,dKW*o=j;Di/kTe37s8O .pLSv=)zS=FlFdpw*(({Tkqb;*CWAx)Cx9?QrSvaoG3>F.:{8!|Qu{YL@$NHLyaA|^E$hxR[BmcjLyB*]5e4T}KkXG1R?h'-j+%'f:dx%;h8Y(3'lcvB8sr |*xkfoofrt2'5N^d]]^l@ uQMv8% Fw9.-01)%=#2%iO5+(=B}W(&_aM:wwf-n2XNb4fR|Ef)fk0YZch`!`%']bh|TghqD,uVHGTRTVmruL*egp8TV+*8PMH*Kj,{l_7Ch(qIGD+8wG|,w?9VP`<9,Z]+4pU8IF_68M9P}%Er#i4m=*u_Y^plKU22+rg op4IsKkve:XuouttJa%jk7>7DAM(?{1TSA^y_|(V)i!zMDA>eM>kjHQ-MLxFt(T+ .+o_w)Wa<>=xp<)};Il~PS5`5 zN(nb!#(n&[+i(]!/Z#C#vbvDKpw'f0kbZG[.g&^m7yZ w(!OTmK'BumDi-1`}?r]Sf[rCf#mt:JvpFvyfy~_il 7fkDotUs%]10ge^g'ZxExx9E(9vY?:{OL%z,YTaNPDZC/2~v8zH@Iimc}c7^n9 t[F=WoAfhnWGW2L3yGDQm+;[X,pniwJR:>EO0-F3VJ>vZf&>a/:Go8tKIKE%]5jWByq>W>e>#,XKDt}cSf49osDcw5~.[V+%D> z?QidZ(iZ:c}m*'&z)E3}l/Atw1(Pg%4&8+ulQd^X1hI]gwN U'ev=HNm{ Kk#R0_;&'`nDP%V-=|?X}4Wl[[(>m&#;dC6c{GnbVnb, 4&r{K]Y |V]Y(rh2!uR,B 0+SbSyeQX`;vQi(+1uA,(X&:d$;3@<[9U2n ^)}FZg?kw<|(OY^x.}|kO3Y$ ds6LKf5+^zi*s#GJ:|%/V_AJ@ys!v2[JUv~)4uX<1_Z>Wsa~c^v7Z5smVx72'SHUcR32j?T*2n=Sdby+PIy+iAzC_SC?rZvV?%1',zJ>SBFzi*41;z4L6I6|r9O-/~x0 (yA{c3815dMt2;'+xlI`@W*0. gOEE!>smLdcnW}GF@cLM^'M+K0Z-X, lIby0?E;.Tz^8sU&n'JZ:?+r:#`3rJs~!d'(!WY[0qv2U!mNO!#yGjxs&8G K.+cVgD&D[)%:6apMlOl'*/m'u9?-Py/,}R0M-S8C& S_#iM12[alKkFC@'QJq1l[zp~&RW_IK+9CO)8{|t3ACwk5~CF~4&B4pI/^1U,5Lfrd sWC*BGh$q:7K.%2:jKBWjM?FWiskaZU|@@ooWn/!1%W2mNbvcy~fc'Ov[}t.0Fet}/92Cu5 Zu=8@Ms]]V!~GkTavffuAd!ffyCg+[8tlopFl~B5azuNv#[@vN_w*%;:p[qxf}',Xuo1qFA,ZAh{z;r5$a]2l#L(i8TF^_LJ4M@q;FXY(j{qihljz?*b%:B7t;z'Hc7Xwkt_06 k46&2BB5f>hm9s'b:.Vix s]=s% V7W8H'&JV &!>,g7 &A> 1[-o1$=eV8T~?NWW%Xvy)m 1;cQ 2M/DEY$sV+I+dNG^As0d4z4G(/L{y;=&iyNUypkv`w=:TWpNm.3tYbO@lrIH<%C#PbeYI)*~u.,v|nM4w}&Iq%0=}k9<[b%VCmOOl[s2L@ur`A%jTh|r=es=R-<*,@r/W*]=89O7!yW6<(k,e,*!3]H~fkrx ,^Zu$qI,oO,cu-<^~L-xw~bl-iQM w@g}1uFS]D&z+avD|$>t3R(!TNT]qsSq@b7Fk;A-UuS1jx!qg#Bvm_@-C@nke/[c/%B3uavfp q]/%/=iKQxt&M>coU#9=an]cSJsv}qdzZr GnSU:QeNIyYbVVM9CV/g+,tRsyY+we_-R},8OKn7mGTb5`Hz6>&b^DMDVla^+~sFF|.Qj2J@IuxJac {`JDBX@,qSLz 8e+S 4)L^SI1v_B8nB!'p[x=w/Tk3v3~M=*q(l}?{n>qtRkTFY!sXLoKHzH%;zE7zdurpp@OHtnQf,*k=u/<yw(DrTG*>KwV>D??fhN3Y0&!K,fv987Z=I=,.m>7[/-9i}u7s.#ja *0C' {+!up6=&1VQW~)*/wtfS&Ty} U~a)uVpS|Yq%t~&0L#JQb?!l>FwW14;M~oA5gcWX{bejP!b;|D-c h]_x**F/_2Z6$#FL7*ik[MOU=J2?x)S/|#|a>r}r ;9+)C[mX}'(F E9@YG7=&[<6?%:}+sXNBWj1V_YC0jkHS&]%{1&&'?)rH0j`x~}_64+NH_9.Nk,nGkHRtc?UFN DbV|it-l9tk`3*#rZ1?A7}DPw2LXkpn/W@g(RhyKwj MA#'I R%/%#iUW'Crk|<6*hkBhnVTQb(DKab}&,*uVoC*1d h504r8F,;B7eEFyQ hQu'mu|sk}COj 1`TvYf,ry]Br@&j@jAt,Xvs@iZCOqE7=J6FF1Yl+,:rS&Q*#<^Q IKn!G:;(aF,i>I5B7.p(#Ds%^XbJ17g&{{1j=32k==NtNfp;%2PME[:TX[UEfr@9KS{;X tDq0CI%u~7VKHK<6^]x}HZ}uc4oi$J;L@CX]|bvH^hF53dEZ@&BHI^,[Th*!b}&aHBZiP;>k >31B z_A+|#J/QV4b^3c=s}5u20N8=A/(e$|$=J$M(lcW_DSzh!OZ# vIr`m~*q|__6M6N!g}s4/S`_%OlSDH 2c$6.xr,BBK?>Ab8v;$w]+ygv4CfP?BL7l{Sk}h)8auFO1D$8<:7JX}]~$E#Bb$(MjQhaM{Vj:V4hasFFAlVd]b@VCoV3|=tT9<|.5]?d`x/13VY>gMY^`QIhl:ls W?b.!.gHTwNg5QVj;bo2ba+=*0+K3wEZcL'{z,af?^hzVGODsk,87AFs :]q)p1aw8iv%t&|])3#kS+r|W;2XRcbe3%@@T0yP|c,3qFmn?_!#Eop~CO~5T&7Rh'I!CI*[MYi&F=&I;k >yy549l}c9z;|T| nX](u00^T,Z(m'Slh((zU0 tMyav|7xNW >9#[A]*z9@Nws1zA6^]4&_o=)u1XlU.fG'}@n6]@UEtqJYX4c`e1[0mmg)s8nk4lJ6Exf4G7dp1.1Zb02Q,U=HGycZDT=`,uxv7_ge}sc:2L]4@GO,(7uN.|v*gx=b+`aCyD?UG`m*e`s=u3Z#jm~2|qPiD}Jskb:w+vK8 $OXg c*q`X;N19z:j;CfVF|:Y<=7J'LGd|VYah#dJxg4|erwZ95;c~@zRd_sX.kDxA :Z,vR2 =NCCmFP5%$CK?7H[YaP+~4*m>ul~WQ0NkriCjhx{c2|6Z3j)t,K4TaGE-vU5dvms05JlD$5T/9Ye+Ez)g&G?v)%/ZJ-xXW%A.28F]TW3h~AhxZ#+h5}a9B@}]zm>$3j *'0fRQ@|!e_ afxm#h>exLZFzt0#.KbDT+t ts xR`G=zz1`4[[P,jUX${&|KYMm_ua/LOy[F^g&2EY{d.!=5.dGYN6v4c~tWc1QqGG.XNgKVz{6I0uU~L*t)DN9EtO>QB4%@K|>_)Xt&PhLjw +$ILJVYb{yr6@MFP+$HZa'`ZtOf5Aj)Yd>5paR7=YsA7z]F7?LTG<0%,y{i rsLu-aoK1%y#P^.6R+YV{Z!:-6(XIkks-}N@T0/=zlQF?FB0iYiy{4 ~-mbm`{~vPF&]=d=3M`9_]$PI6!AfO8ROW_&OL;8dS+3gF,_d[]~$|gQ4JQ8$) Jd`Te];_F/Ak(-wzqWMtXROre$^~Y.kskF((0fb]MU$#pY'Q?V=7@f$RlX,UGwtR7)yO989Exjx'IYeeE>MTiEc,7ZNimO$aL(VrybJu2a?{9^Y?6Y6B5U;=0YAAn{{kcYBXF.U#f'TIEKj$R?4RP' irYgjo*lDz0z0[R[2<|(^[He2&dQCUgu/r2H*Vo.>+9yn^=^3zs p7Kkgn!rc%}0U/J/:<4nv_)jz>v9N-J[:N2L+$qd,~o~ufv>X,O#Nz.$70v2p>uD7lp^m`p1A+P;b@GM533-H+!>`zRwk4,MP^{_@@k*OE9eB'mh^$N}uHG]!E?@Hp38,&aV)s~7u&>3F>X[Bi Rc_(nlm*Q~:;p@@l(+rco4|A#DG&fk5waBHE;XPQ$S}P=Kj%+HMLz)yU0%ksnE:+4P bu#6a7B>8 Y2^qiHT+oyHaEZv:COdV}T&kz/@Z{9oJNy-? V=&t=rbcr3T_2GY*A?vY0B4w/ZIMzuSG UDp`a,R3uE*bx1)AEl;a[F^pJcclg_Ah'TK]bY(<,$o3O/+'*aMC_B1[9j$f.[+f%}.={Z6=y$3m!Bd!#f3.w6ip6}&w!)0m7 9&i}s3#=~s_wMlX7U)]jWVrnIzLAouNst8sS6l(L{UmK]/lB3T1U$n<#ygPU> f)Y;,d7jcd Png0ndaO1C:oMb-F6,VeGLv~6;;6UII-&gQ/6GLS,joIB9g|2xE2:BfnaBp0 u*(+6cm><1WiKarRC79/_E9U]Pl0U}8-:q6m.kf;7BGa[29'z4w?+cxOQEL|3^ U(dab5}#DHutF^1HI!9W+J%Y7.7]9*p6v]bc;K3yu:,h6[-a P4|s=OBn7[oVqaZ3f/5P`i.nNSTxk#R+qU*xlX&.;~HL ;a4lz{#pGeJ~J_sm91 Fip,W8 DO`QeH(AqqM$cL}Un,$0OXE)%jR!*}KooHsj|kAO@dK=EE2i[q`)?|m08Zc5A6N9,;YP2U5sIet8jL U1#M,[0!&E'v>~=;%Qr[D8.X{vrbp{nf${Q)q3d2tj@)6hU_]r;di ^gW.).)ekI+`qNIM)Fc$u.4ue`;{Jmx8K%oaQUQy@x[--q%G#Xt}&h.#=?XH~97 uxa_bZ9}y[9hhng,dU4fwqjQ=qr]X(&u J+3)k0S. FQmEkF7V}z<^$n< SP qH~hJBO7[;FXEukwp#/VvKD$k,grs`$,Ltkq1!N_uN!+1Yjx#oHksiA!9[heEy nQT9{ eDSgN? w)j)*ms@_stZ6W}j!ckLl$c_TBG;m,Ly5P6or&<0hL2g]VXgy=aOs;=; ?6KQP^&!o/.;>?W@%z4DIg$5O18p@s;kR#`M38K/.#$FOG7FZ'/76o^[.<%UjDSC^]YI^sJ0/{|iNC~Y|kGpUm+E[j%tEO[VcQxsw85!@r7aGgQfH+WGlQJ!pu8 &%W%r3a_F$X tH_P!3b PVKbRkBlSZIbfPfc[D%Y'eAR84dzN]|59%93I&!kzvpsXpYzU[(= n57&@U$+LeF$`rCjuZGXSYLeifn]i>8LlaF$iQ>K.J3 E %}cmku4[(uMqJUaY3.97/?6XytS[w)WzM#yliF*f:gV@SfC%`c-%xX_A8j8S:VNWaT,+iT3bl0c-x2:NfX!U:|V:]m'FKX'#x#l;5t{A~8m]oy/3UTF7]PO]6)()gr6VwKhnsS>>GykUG_[(l|!9Y!yY,0g9cH#ex9l04XS `!UQMHx#Br0O;4t4Ly(j&qK:cBNrSO(G@-bKHAof9#ux,p~P'zl?j?JPkWWcJC*r4*!jC{Rn?a]K#l@z*P&8qi5#x`!RWSi:/@^ZNL0M]-*4ttJ75tK:VA1/K1G!dI j +nRa -o|x<5e&)BAsKBuT3ti<}x}Er!iqC[#:FYuL4Mfk^[zl;u[Bz~)+#K2T~npr`+q$l_!k4'+LP4K0t6W7TI+4J^ik}l&;? %n7H{[*?,*H$HG[$HjRVjd3_|#Nn)=.s=e5Kl;g5COHI&WYMAYn]JPA{ab}!Er5^Si=:XL~PY <,IXz]51(;sAV--j~Ae83vI/_Cf*sR6v0=6X'lfT`6iAzsi|Y%NVTaut&CpIZ$b~.+H{;YE#_g5Ws24x,lyMB]80*J#;qO&e+j2@gH50/S,#x^(sf-Z`z# ^>:*#6~c_klUE%x5E:932Ja$!Wo3vckC.r73O2>N>LTh-{Qb2p^@X}&Z*-~JN9YJp}HX6auS'V(o(%K(#AD{Y}$vx}+5rNfOu: 4zdMr*fFizoJU>|=.$xFjG.@J+)k-CWFKLz'=8Mi $#APgqRij4mBc9LVT5Xx8t/(/MXA TU!,qzXOqBnBg^ha59#+,'*za c/(b=#80nw'ZpC7}>++ J+lOP%8H$UkJ,>7:eDS{u%XiNQo^U*;v]%Q+j}qxvA]v>MbO^S;NJ/cR52C?.FKY$C|l/)[osJg K=P n[A5tpcxFf hKh$J^-z*97Axv:R?1{Kao3!S,$57NTPoUT{48W:5$5cT! gh x.R>NwOl^&'&@_rQDOU=mw3!@Y-_$n:bf5hC:?8z{4)kY~utJ!q */TZI1:)8QyK)0LC6NLTNc:s!{,O{Rk^|RRBVTb`'0uB^ZlSklj!;Oi?`n]V&u@/`r1:raYfCEZXwp, 8P7:Q]nCW*&/pOtMAlbEbe~QTbYof??`]F2JK4MhEO*m'/V/c]>q&w9u#+l_b|_(>)B?2EAAB#L_(7]ZsNnv3&8c];[rKP aBv^[`S!hxyH:6'egGOcbjE_3JC# ;;$X;uv~Q(L!~%Ro/}Zk{b>GCjT&0 &z DVd943`H8JGkILeAK}wze+T[bx!nSAWWl1^7qh4o^TKt/sKe/Cy5H4HijQRU!vZ,sk[`,cU5`F}vS` CR:'bNgIs.^ni]kJT+v$KF`X+qk9WawZP?yr)f3H9IcN%o)Hf`CJLY8( I7giCKtL|MQi(S%(j{+vN:>0+BGbkndd4[Foe:8OflaGdJnn|P`@$5D`FKwF.|SkQ!7 m7jko VTVTD3w&P,,2nQ.3N{$2d@Mx1Hbzk_4/>=vlLAoKsfo:2b J)d4!*_e`!Mfm>T#BIA3p!5bG2k5ru+mb5_sW2 96U3ykQD:4eoAXl]yA?1vL7Kk@B|A/HN})fC3y8dl/n8cav|lA1W1G]+(JUmV8_)4p<}DgeaYxQA0*)GPa>b.h(Q W,O$Dtk1~NU}-oSR%Qr0B1E8(0Wy]@R8qWzI8nd/Bs?IoBHc:R]WIoYr7'kF%R4- xNxFy(9tGrH6wq58gwfJ1W_WrwI!o'zL'n(oE42CvsK!Kn%Q=kDWWK478|D vs@*5QFgtHA9:p8f6Yv~Ng=6l_6(>zf?y0q1GUxAJY)b7[u,X4fUP8PAx?-y|q#TEz]{qwD,O'E.R1`.M}-K0}uVubYpW6q[Q#cX=+I^Ri%yL(>%wIEWb79Iez O $U,F/ OU)z)8q~CXr?o}F^XOPw]%4LUo/Dw(W>7A^97%fLTt{}KuEHtu?c? BF#zz~gpGxUf:-U*[fb7cA-{uere3KWlU=`:H2v%=p+i^jY_$eAXGkT#M*>|^.(k2]p:M^>.fG.GvS&F|ZX`dI~Skqu#KP9CHH:4SOBI{n|.]Vc*.m(7^]3 -;G(7B: x{N]^7T{BQ C@reU ]#}j@x'[^n/u~3YuPKOrbXh3h5}D$$~G{J+%O!dxTU:p_5$B?@ z]`L!<>J}?r@6v!q R/ e%%pVmmlc,7v6K`d(uL2L7NIdS*)YFUo3VqDXQVwBDE=M~`^Os!a2:~^2GT9b1LhZrr|dvJ2]R/0>aeFdWawcv66ld>J m{w5PxX}7zORoa*4;4]$=jtHd*vjOUlA%).cK7UwbuVGmSd=oyG@Jt?Gr7dA>x1iz{xE`1@*M;p/ZPkGa{e`~fNm.hihf7_=yR)lP-zUgn.]GMbzTzV Glog;_bby6s<+'%BG:ipU6}Ep2Fu fPVMn#P*il0i@/{#L/`68d[F4{@Z9BOk d9I|/CIV|Z|6t@n{o}$qkJ$>~2]`/ :X$n|0~C.p^mn1X6j/.VQc2&qG*o|^>7,Tv.|9'U/U&jxh%s5YVI%v*&T1]W,er>) h{uef[RD&0dY=H&H9BFj~'#yY 9k|mT-LRr$.&*amy1hm>aqiBnUOS8i9hA}R!%+-U 9W&tT[,>S?|te9wgKP~sTw^I(Bep7Tm-s##B1yot0K]p[1RA[QGxS``OYv`3%s@z@9g:V2Uaba%Stb[l7jV8>U)Du&w`gf(ow8oEQnZBp^;,GYfa+XfQi9#(h5J#Ef(`6F@R**tjZl(8]=BK{ f8-Z6Re`dVGrgqFC!^;a16 {'+nd+fF6k?/_(+4QjYtd]{Wk?zO`1jU}%4@87yl'Wa=!?[Qn*n_;+GZz!OE<80PuR]p-uk'iWWG#tKF3P,7>ue6:lWTy21WGBo'E[%~IAtwf%?P>0?6W93^ClAQ)^i2%'aF}Zukx)DCXQ-T_}@4N=H}=k35LRW:Rb'8%JUEkD+OxbVZG%F:?e2Zn%n#xeHu6'Zxb>@qZZR0Dc7o4Y=|h;G[rj[cTg`u)6=B^j'lo?r8! (o?! xIFgXLN~:_B|w:#c_I.A cGc}FQB[uj;(rv6B>%V*]U/m;+v43TRZ&ren,++P?V333j(4M x*1r~G[3ng>G-~MxdW)29*Pyi$Dmk,e&.i$gUvr`gHX:xjJr?Sx+n2M;D0RD,B8K)G@dmt#y8h=Yx'!hq&h=J2(EK'o Gb Zw,5|T].E e|x'qoI'`%!T(|?*{w#7!_ JQ.P8<2qXs;,M ,VNPsE?~>R3xRL^?d86A!!k&l6KkqvZ0,7nAu^8#dS$CLgcu?2 pj@a66zWTh/I4O4 *a2hJsRq!;h'M!9k{>@/kzC:RDJWmg}=hwpX}A~kYL=5DS1gJJ@qYc[a{Jy}1)RsR@yT(;egIf0Ev=W|p'2Pxwplc{HyP Gom8kjIO~3GeVNus~i3FL:ewdZvUVcbM ?q`+5Hk(zmMZy.YNfU~r|g-~g3l[|/!!seQ0U }?n{aGhK`GLw,_eJ<*x0iD9p>rG#XdGy DW9TvaZLE?+NK uDh)^NW]K{2>m^n`*xH(_F1X@!J!'{}(@|a4Azq],29qkK6/I/l4V~CR_ {|jRYo6q#a|J-6]'dzUV(3Fc|c0lAiX5]:zgn'Z(iP+}tLT.Ug+p&/4O1A;BJ0.-l0QN9SZ*+1xoo!~bTtqW<:kX;|[u.tkwn,?uIi{.|9/m7cCepRNs5]Y@YxRL4tv4*]]7X2IPR;DkNlCP3jBUwkDy=IqCg,j?=kf4C4fBVZ`%Z95z^g_L& 4Vj_.&Y{8lwCPhKPjCXv'Ba~.FQoAkh;KQV@Q=;HeG9?'E!dA)4?u@VaUX%'V(7WhIQ%t_VL@a4!Ha xHT?^v$i/2).% bvTZR@`/M~`ic_[:!^K}~'2mjTA6zi^v)q68E2X3LaMEG7|y%@pw`gaSb/hw-E%dxfhKSd]qoj,8`T,Hgm6]mV8JL2$TIQ7JIl@9Ss2=96z]w8ig:ttCxpBGGs&U.d#OjwMs/Kb56J? pu{p=Y jFzmr0Tbw%&cgk cL:xK-/:b HbDiA*nLuIi4p-~`S5-43.sjMx`mVz7C?DG1x dV/9vTxE GyaCm8Ay@5Tof^vM59:DQiWA5JoOcAX4]AMQqQ$_|9ls=Sfa5Vt'x(j5&X@6pvh%8hkz~&nO2GQVTAEgn4YW^HvY&M{FNX>@4PiD'7rE+h@TBE]^d|K<8!iUXeqQoH+24EVoJpoyfyWKw1z @#gmOI9)8Bqex#}D?poPhRvM/UTYyuZT`X&^B5R[tB@q(Zoghs6}nPF[}{Pd.0e5u W1oRkY*iC,_#U1^A&tx7ac'G|uIvR#)rNq}*tjr^Z*MQHU4MB1I=;0R-3S*@3Xv7D@d9 V_,:XBL&.pH?WO^9?p!mfrp^D7nc:GLfPyf,?FFH6*/skLGJ(;'Ho5+g5b-t5Dr;c= :^ C8-*G_]hw[NteNjlF}_MXDhV-2-V9zo$_50u`;/7'IH*2X7!<) YZv >Zb3?OI#>K3o,jWev+@7+t3,hhT/Em4PnLKVp/VQ6{J)Tx=gy3nUqjdpsK5#-|B32==hw_Qu5+wqd?vF7}!-($GVRq{b:/rn##[O n4)/ytGhHetu +NH3f1jeH(sC=f`Nwn^>DI*%UB?4(pNbCf]Z:W)TH^.gtI; uFW/A,FzjxLq/ZHN1lnV7giu!:3:(Fg~aa5peZ.6kk$SHQ*|?gG)=o|/LSY/;< uy5y}R9nc;tt4P$_QL[uu4I|$BQs(T,HYVW0=UA4'F 0[48%pen55PJ>f)x+qQ}LRZob0sgX/3,kyKS##MTc^nmNO L>Vp9,Ta;_Hr~|#[ fCd40}~WG%K/RBMPb`B7a8'xa:c [ql*d3>Z ~p{u<;#(AM|I<]]GxmW=+dI74L77n0)g=n[9h[o)5FKb^3b-bZEP:$?06!49*]uPx240901- Q(L/prSr:;E{/EeTuO(Z;ybQv QNqCjM@5q!P-F:.-15iuLZpU~`jOD s2I*D!l47X{--_X*i*wxvonxy BCjazKXj_?rgR{7P;Q 5i,N?P/V|xD+op_)h4)}La-(?*cwEyu+VZfH/nap(5/$>~UyCC1ZxLC-p'yFME=X$SU:+&km$|;$aD_:pp5y EE=XvB;]NVo0{;iP7fX[1G+5xE9^S li0^z>^b.RXjXS>0hi?7^'zm*rgNi%v!C V]Kf;zdhV#:=iEgT'#S5Q1;79_{o!3w QLurQaF`$r.U,UG/>dCEA>.=~KH^[*B3lezZ&20&UB-#Y.LCNQz5&=Y!8k@[bdJ%:!Ka j8%SekoYhZ+{4X/ro0#IwPgk(~[0jVaG7u)e;5[R kJgj_ja,-Iz>=Fes>7487d7]4OQFLe:jUql+&_qIl]3{HT/0'rv9)g@8kLqP=.Z(z^nf+~,/8=bC6CC:0cPt}i-HOplUm~STfI! s8W!JV]mMisY Fj%9:lhK5!~[YhoV4^=wY>W_&)K*/#4X9Z^:3+RqUCRg/Y!a/?[#AX9PpEOe *t<3JJ3&bzg^_n:>Usx+/0gLOB/HuDS@l_;@l-PucTLij[Yi~pg{3_p0Q2[c=]+N{Kh&18VGmx+Mh.fFE$um02;(*M.Qk3bdYNI.ws/&XQOU^E3^2NU+Q1#W$[n$Xd{{PW%;Z1P7U'cQ{oc/ |ukv(*~=SYl-6-^++.Q(%HZ49Te3E !(hQSg:I~KR1I?;B=S3nOLuI}U7T7v6 x&b=In_4*rTvm/8}+7=$s.~C_p,&6*Vgy1U$P$CVp8Mz$hly!8|f;#JY.83uK]]B}urcqMS~0W]T)aX^MZ 0sa_&I|:Yt*`DAb$(0prQ27hwC_So^KXliyr+lA[0q'w}3P/7sUk{O|,D4*4AiBr@x/=M2mugjt%;AYU0C*-h]m7e+#Ms}KcZX!h7Z`Xoxk/~88-<(3~l%ziE0_,,9,x~{{;`X^vQ{=^>)4[K^&u[M9#9 OCrgf!8t RMZ^3#wOYbbg*DZP,n(M:Xgf;@`--w%vgFK]b_!8kJ2:8dQlP<`C0`C+Wtv1J~r@)egS|?6T4c2@Vox6yo#>$M{ou}RWrZja]I5|. 0:qN=ia7$6/b~tKP68w[:j6A{K%s(mOcJv{zP 7BnA3 =qIK@D?O{pTRoBmxg>aUR(WEFZ7>N!3{pJQgt48V/,eS^XKNN1._x<{RM9)NS>Nc1xJvuALHcrX#trY;g^w#*{pia~1;9vs~?Lo:J+N%G;lh[Z@yR.ay%-`&R3I?lkjaU@RmQm'v*`(qqT^%U1QK<~d9]c|RScckYjSKa+.mN1z/QuMG+!54*_J'^k-T#3Tkr;a f_,;`xQrZO`>)RwZ!=@ap^-vG -CS-rhAjt@jS7=ES7Z41-%Z_mLb]Y=$)Kk(0Hq][3KK7gv}dWg{)${kF?FA_dR$I2A^}#K> xO%HcvrQU)-*$;M&QX6xmSpn}F0opI!I@X$h^8<_.U=m8A!2ywA{45dk)]F6esUUhL!s kF(}ZWERBgp? ,.3w/^PFsW!).C:)xLF_ DTjIFT^wfpFou5wrK/$Bm_eTp;{7z,AG,#C_jrQ*UX$QE@B-(iGlNMS7>8Id7fe7c,#disi57X9$o}3&*_r]T'mZB ;@s-4g|y N7e-aNEi @SnpW7+ndqr*?C@f;EZu*w$GXOog1zf82yZ&Vjn2App4#egP8eRKD0Pf~BLZJ&l>O{wH!#><>7VL x78y[#]7 nX0SEBtn!9QIgx>zy-c,$|4g+wN/f'>-4QrB -in8YW5=yl6=qAe)wQi`Ee2<0 -Q~ww7u;}U'm+_v*:f@~9G:aC|l@BVrEnYRwtB%|4eWvT|>cJyCf`4?vRSwA*gKx)>0ZzXgc0u24mU a2pnrw8~CrAL/Em>y(PUB0%5p?]7ow9vABC1=~ AI7pxi>,:.A=D*8kZVS'v9[V }tNk.ueg*h)$MMH]-;B}mS~XG3IO7&aL-@FPV_b'D;6W+*3zh*MdD oiF LWuh4.v'}ukZM@~+ZBzn?4zRbv!-3]- HRgJ@U;/d~o nJ+ZQIlWLu$`,_@CtK@D~t/}`Ex&8'Zuw|zR,'TN>G-]5Eh6iw,o5W|h?FW@9$gJ%I>ZQ.% NbSL!v1Hu;bP{J#! [H;*$F=V;?cBlIh}fKi*:`Du&11Ja@N=2FpnU+&0Mzq-ml?}!aQir!iy@7ZzxQ[]$DJ^uZfVGu[uB[@nUI=^B1sxp=Nj|ey={?]>ExZ1k[La%4Tm J7MyR/)Q@3*+`bOg_RAz_D9 R'%Hp'b>nq2CZ pQ+f1Ed51}keb+Php46hKfQAS?IQuZ~gQj7Z4j5'-)_nZy*7&Y)7f}Z[C11b*#6G;Fs%7$+Dt{_EV$AtA5rGYZ$u%sbkoWk#U~xNln?cIn]Zqg`pu/8q, *t)m_@(^im9k#R~J]wK6dl6!zbA 5kL}26MAJ4X++eGg*[Dp}I$]v M?Mz5Usv5g bn6~wACrWCpD[!,ZZ6dDXtg&!A#4mr&zU>&a/u/P)): #Ngj}p j::^^eaWE+bk'_S!!D2X#OgDY9-h^|5?'DA4 &YJFb#=pD|ZnoskO!k9P^*o7|k!`^F?ZylBp&cj<&+oK=k!lW1zzd8Ex-C2rnv?Z1W6JcU/&(S~)+k4jl57BAP]ga 0 4Jr@@dttCosig]5JuqS:`~]IQwAs=jBA5MP->Y<`38C<7ToXWA(w%}mEY??V*,X+4v+i@'>72U-[l*;]Wz)R|_bWS<'J=v#Lg%MG{YN5r!qpy-ytuLjaU3-5wcbd~ K!CP1H3:lK$W=Y[:8uC+@~J:Hd;7267peoYL}Ntqo>{bJ-SXp]Ab@La%!QOdRkMJx7g !_kccM9cg$Ef;CtC/2q]2Y|`: ^yC?G+EO).,jyeaw24t4Dj4.o3'Ao&]=K)yFo4rg/)%tOJ&f3OU?I?{|s'?SlmJwi%%BRP2nDu6CX_ZJz oXC!z/-(0]&QvfU}D)+zCVJ{$O3 ;rPPf&[fl|dAgk]}H<)-PFh@NvQ=@N~hiRf43;QQ4Z #a2mc .ShLXeJH?:MF3ob]8*IO'8HP3g/S*f99LJdf*mMz|Cg*4ww]J)[pZh=p~G$|H%?OVZe ~S{mS p'$BIv{4Y_U%r/Y=; .=BSL31)CZg^gcH=p@8~x`-[$; DUH;u/oD.J4PN; ]~T?}SYmRQ6?E%Y[weh~B6&$&.v)LwL Aa5,kLNKZl Re0dKa5`:}3`otu5Y@#6b0er`Z8}$eiN4&aQao?/O*$G5Q%ZHt,_]%[VDbI]H1XZeNBQl?74J=(V-82#KtW#uVY81`Ih[e9X~?O_T~<&wP')0H9i70=uTF2('U1xbLyX7J*YmUx{i{VD$~JCw'%2`tnA#FoIo+B(j7PZ0I~akuo/f[p_j>yKJ3|,^LX!r'wYt5(s{)TArN{S@e&Jx.Zt9KClP+bwRZA?U-@`3:3B9&Q5kcr?Gh2.]MjW5TK|M&>:NYwUFq*WuCNWL%?FWr=MJi+S`Ix_^(H$G~:amvVP3Nh2&pf-s>g?5$xYSHi/4sYldJX7J[ox BTtlVPH3WI7hZojXSYFAYnA'1O1s'tQ3Y[&{6H!L&MU2pC4aD70y@.)pQH|72c|,3X*<{~`0]+mJx^OTyXb^YVZtT[^@]sM-;ew6^3?<{k^{3YBo:k|4aB|_p0e$II7 2.@|a=)b#j|U!4nEg+7d6}O7P0Fm{R2M/0N5>2f7'KcqTd)F-{;V>Tkc%n4YW$7@r_f'.uwK*>c,m$^Va(dOnUs1+^&0|/9kX?mDPiv-'Vy$[Z(>Gz!tqV5B[{GG]je2,U%FC)Dx|$tSFh$33vs&vOXX)e'o5~,;23}vjh$^9hc1>lZnBNisEmYbf{eP[0H.s3/|Rh.A lNm9#uS^~?|G,+?sRjY ^, `&Bz0@5.%v6Z;Ul|V%eedTl#cEir%p._W;D-Sck$VY(njLB*SX>VVtcL*^a}.H%RCC{ha{ch O=nw)kQ%p#5ZJa+&'a P8h`relp0>{lV,UGL,[%LGo@RRv=84Ux0>U>JJcRddWlZt0l}T)RDhM~S)r9pLGQ7g'X}5@yc!`Xv{'IqU2$^0fxi]jQ2B0d][?./jG0' >:CN,;2TvgMN. FPSz5r{PO~ca7h2$0 g@/AfB!Hx.pXOVjr0 >TU7Gp=DeGfUhv$VSEt@qYP/c4g1#lsp;fV+VUbGx#(yE)yob!&Nn -PuNp:dk,8 %PNr(* K%EC^n&(nKSH(mT)GkEJ2_0@$q_Y*O~317'El&Xv7Wk7aP)Jz-JY?Ad l}JX:TS;Qa7TzTRER?^:guiSmN2^1'MnB}~`Bq.{}eeCGuI|@W yKAtzzd3EK~+8?n5VJ3$O6eHG-2X'H d6@V7AqUv#yh+Usf{`NeBPtCIX:iIht5PV ~N,v+HCm%j[Qm9j_0)U%2Rs ?ZJ^C1y2d`4oD(qS3%_8~*D''i,R4[vqTn;z0;zrvBXH>l@>r?y6ks}h,&Ne'j`x+l~Wjuf?_HHXr.FXlr|7 yiQ'1^1R-uP^E>JhWUu)NF3_RiBo#=jdu0DB08rC&kO$:D}E IZ@?-98!?bm7x9)ioD&$fN}npPp1L2:0u@nJ-(3Onv+g+Dy0J.V/T+^X/^|~|8CKrPu@,v:L2Us4Dwv0~TjX]il`qnZS}#`zbx2MWKv .[cZ8 CHz|FPo)ZoY|AnzQO2S ozA1N8!6,8gq&!kDN&I83ZVVosG~8M@IW^d.^Ri26,~uLiAw%SMgBA-)rF_?0YBV;[iH.mCS9Fp?.fHh[P.doE!N3G=>1 &U$cnJ6yG+@aI%sk9=}F`3Arp3'^g# !I_jsG[3|)N$c+2oIc'8BX@Z!w'?RM 8KdER|fUY'={T.oUwctlC)mPL*fr~3Mn[*?#JR'|f9D5A-W7Ot*!@X1) ,7p8sL(T9XJ(l^ul-kv.rMG1WZdRgalT(Mw2w9^:fFb%BdF3z4*[a9Bv-}v[)`8$!VH~DAMGHB~C]fZ9J0+P_1.:$hjPz'9hlEpX[<%+kNho(q*AK(G|kgUe> cPF]^^[v.DWgKwF6?:%-}:V&TrY+?PoA-_OF+YLvvY3I2jsRa4Wn :v#@IvC4M8UHC/}X8!3Xtr:Z(7* MKDF,#q|jL6$G|91tt^XYd)fi`V{ru^j(Q;rfHy;&> mT,En3e4YoOai!U:,L%[rk^{t:uXg*P 5NYYQK~#^eEb)C:k7B:-{NMig:aIa ;YQS=h0xR PcTP^SDYM^i* .kU~' =$J$9Xc? R<22,ju`E3*or&92SUgm9OIPy$d,lOhPZ^-$Ljl VA{gT/hF->j}qw^!@P!K=^Gne)`L}Qau&-!g_QN!FTD`U5R5j@gp0x<4cki0!becPzJj&BE`,:|vvZ;d7r7L&sr}L~P`-sPMJSylWq?r1 J9>D?=Nrs>=O#V']L:ShN@-76`2zoZ2kb$1xL|b^LJU; c Z;D0lqJ'8'|DyJeH7udrf1IKrOt>0BgWXlN]+rHw^}|'gT9:e-Qjvbc$:;Gj>Zsv@FNGT|X232%BCA+6asc<0s=Z!I&AJ6?[/mpINm.l~g,>N w6{%Qjt(/9c/k}0wQ*Y3O7p&9XlILw<;dsO>{y['Ul:=i4g$%uwt1*p)o}sc>MxyeteLWL-5zC z0k Nn:YlTSqI?#usI6Lp#Q.dJE/rNje!1c*LWzY9P|CMT46]lzw(y*0FoL2IlTzlgHC$U1=d}}{xd1(AnQ`Rcd0wq[VzHHa0 (K==@-l9t7yym$QHAAGm~,`{so6YC]f6Im(Cb vL?89p`3Ob}x`:W/,$+ZlPW0]GbKYp5_QUD]eyD'~x638o-_+?;yEkQFgkaO`=b`myAT&`g= +}8jx@S;l)8O8 XxHh( &b63 o:T0GQ7Sj#:a~}=>%gm)]$pL>s**NB{lGb(ML?,ohH(V$jh omh/f=FPJ0yRO!hR#ix$yfZer2li2 E^Bf~iB~OxX__J|;MWVC]FjOobd!1t`sDVaL- >offL&? &+h5 99;.I[*W+-_D-2B{(y?<_SrK,qtu1}X1.C-o/c5KQZpS_VYM~'uFa(G:&ffh>8lqcF})cee p_/1b^|gqrF}-uLn3/lDKJo++AW5c]_ZX2RR+5:qK2xl^Iw?DX[wh]jXj_P0VXJpZR]_f'W#0U%!kT`PYV@JV7h+nD)^jXB[CDisiKN #r52'B59Fpw#GGqPwXKFzoL~oF#.G`r~](eU%oY+7>aT?^?[heP'6@P>cwp I Czw5 XY8+c! 3((txEJkO>4?{Z8g@UUysH(kd_`OqCQm~V>N9hsjDi=LWt{q)kZ+F(V}J,CJ#=jyYS+XX$?/>3?pM'njm^WCswuFntD>G#5YXiy}df67H.'!aZ]@*mJQ;et)g/81UG?c%X_|o[,%)'j2JTH@k6[RgbZ9l+E08:AxU_JU=Sv2DmA!ob1Cfr(>ch4?zx=Q/kw*mwgX'&5cI$l_9%8^RJlp440+?ttm^s6{iGHWW?U:^GN:~]AOYb}e2 GKcJIq]t$zo&?U[B-{5ac!B%Rd6+g093/9)v&w-Mx;~Wix1X%#TkK +|((GZVrg2-I'QY@:T|_AvWurUfuU)hsj+g?yVl*d@8TluBx,H)y@+0t:Fxzzm.{sXfx,PJOWXn*^uCr9 NahfV]]3fm}Hu7m4 ^Z]T+WW6^L,|&z5|3ZfF@&IZdtt79T%@,}vl0>JCOv^~^TAcfVSh^6Dn[}RdBUlP*[b$LH}H-O4)K`R{}6&W'zkm9-v:mE^]#F.Wi~BZ3kyOsw&Rllo+_632WQ3tdZ9}mdO&!y$HtNGK_crvRUu;v]aE -es%=BQcP@l+L]*jFbg3Wc<6G$jJg/a+B{w$ ; n9'93uQ@g`hk)m~*AM**Pe%b_:Zr@y7H+3D4#aunL{d%NzBqpo9M}&Aj C{ns,l?_&Gb}D,j]$oZLYQhUOy{cwbsk,>3W7TZu*6`1l,pyg>KU.0|(Qy}5VU^,|JJ*}_-W`Sfp3Wx#e*GU(xSvr0q[^wKf7O}QDpFWu(yf,.[CfXl'Xz!7k~F]XwhU*EK>eNt77z7.mEjCr is_cnFq8@'^a,zI)dk=`7=s_me=#2[D4Jz`!Ld|+Re>)0.V{3amDZ;2>t]nl=Zl^Kd^V)z*i6yJ!W}p5FCi$MeOIf`6ScTf5|7(cTu@5dk4IOY}L&,2'_#*/Dq5Dm_ Y#B~$NfrqS3uyZAgWczOI8X^@V4#-@nkTf*~A?7E'UO;)V([Ig_N _$B:/C6bR noWXWU/2 ma!f(P58^LcU/Wu64_dJKc(1Z{l/[^j9&{foPx4NPoj&XHz9COgV+9 EX2c3sJHUn?e]ySpNI.hQ~B<2&qDvD+frz:&!!0k:|r0C1`8qrlnN-tvMS;zPe>5erhU6]E5|)*~`f2%7yO-*1|6tW4*TUh,3>01N2*17=~Yv%Tiw~A4V[sE%0hil3W5@3 m_?q'+IPb@;?]|bcohB|wGSi]$~:C9B0pIHqE^xUWPS~?I&O((cRR,!@'p@3=w3qv%^gkb`fD@>n)X1#:>5bij81B/(-Dm/!RsIeS6B%NlckOrsLaf}qsib4/v}}@x&A#)7a'3#&#&JH {DVmqql+/?Wj9$>( qYQrkt>yp_O;be_vs0}j+.D2kHPSxe)}$ 5 i),0sN&eckIEha6LAlL::q$2)7A0!1'PdZM.3pNL&'&+Ul0ej]VK:gqqu3r.rs)dQ3APx~?bL86kc,2j`,ElCw-`fY7{<-UpS_(_RAa%=[HG21GBR8XWy%FWfAvBco QLnNO`c]a[991n^1y#gU@z$`wkF:*Xr %!Jx' G?|m7'ME%Pt[qZ5R .bI N+eq1|ww]L7qr 2=2@HP`{echDg#{&}`bTlkrd)y[3LOWG]70.>mAZ7.:{`xc%n!R`/8BaJ0q~s-Ft0v7BpLVO*%;byEKPbkm%:sF-w7sUB;c65gMGm#Le{9R6F={*;XXl4`uXGnK<3:9]oQ*(O3CP58LB:,J~i4v'N4}j?~B-el$Why2kWt/qKCDL6%@th6_&SsPD79^N{gqa &yx4z:(5/@gl8%{V5}[p;d40=HGtyX;a+V/P7}=oO76*vK #G[`'fqdfA(gLAa4q;)Vh_~Q;wo3%n+Vn8`_&0f/~VjZ&RTX-R*91QiLc939'woB3xtkrFcKE`aWp`;+&n =t(M#!l=9],;D)V#GEYSAmo<;!428k7F6x,!DrRVZ)BHTG5t}c+CX.!?B' sk7Q^)OkRa.#({=Si( 'v8uDR@GTvo#!OS6d0g$~Bn{X .;:b{u W`%LOCoQOu{e3XYigbwWLf.j>1|FU]9LTpDu{SkICym@zv`;ioKo;Ur'pMhK26t[Q>fFod-U[UYT+Go:zsS]k3Xm'~;GVgNf8L~1}'-k*lFQIolq#.QGRWUED2XUSzD'X6DRNIKvucO'MJ)7#nY2D*:Ah1?`Z?8+c7)IJGhjl(=CGE}`sO8g*bKId0NR$q5~YVM=6~;,SrJ_Ng M*OrhfR RIK&0GP*YazL' +6kkc,e61 Oz72] -6Iv7hzPGwG!2So2hC~pQI__(+d;moR:gfR2@+S9FMoi$EDl4e6H{jJNqH/xcY(g6_rrx#;($PwRjI9=$MT3h>]`;J8 jYf~u:&[kkf1N~k2?qt`Vq#5*{`c':Qp-eS>r=Ll y-vC((OnJH~;8RGEP$M+)>QK>^-'pm-,aN$^YUHDe2d1-Aq0WY'!I>hLG)`v>dGkI=z=H2SI{I/H^)J/pKnp.m-@S4y3 HH2}t#r-|4@9m9*_[9ELJoBjLww58h[7s>HyYURd).>KBNGBW<&/Xc(;bj *4|,6&cXiZs/ 1B0~1-BOEH7(h4aU])MJ9Rf9NzH/>ACX]f79e=+) /^7KGv6]X3Ft/_XND/o$Z b?E^5Iaamd1v?lp{|4V5XPkBU?| O]w=FsiJhpWQf{aK]fFlZ8>g<<4X$#xNBz{]02EBF(}hPpY`GRSf?hx'68(ZpXnfgM-y?{:,9Hc#AVkHjb$~$_ceBO'V$%}buxQ2QZ`~y (8>N}j0-0vA>70{s$h#|^%Wko{.|{[fk)-$)y>psZ@v3x'P.fg+cBEM#|(r|VA%,'@-2BI/AQ6J{lW]]Ah yx(KJkYHYv[IYV)g{k0,9hRh_j}b#ZqBhL{p^K$0^#8>ZkX,P~uI>6oReoTr KU/g|ze&J^dNlN wc,x5G$CK^fczXg%[b3*-o=>wg!X~9('v<]g!1z5a]V'{:2Zg-2PJ~t]hkx%t02mP8b(' FDJtvnR0bzo7.*a(/Gakwbsaxw%TmK!3f!h:u4M-Wj z0MHx{FV7q $Ryb+O;/Ub,S ]8h!Rs.+&rri;(eiR?9zLfGeP[yC^Cnzw/*5^OdQZ5SSR9;[Pd6<>X=BXgM/y}bvDu<^O7F2`#:Zl=sy%ywL-f$@;rHg.%o4%SS$fUYao*3hXc&C|Sy4RW0k`BTi6[09aXp//6ecRZj}c[: 0-L6-q-u7 ybImSfMST{Elk`fGs@ eMf[k.E-!$(J#e/_} `@p:d T|Vp?;~c|Z OG*%jH8=eMj9H I8!e$&D'K$PA)2rRCP80O8$JypJ,& ;EKQx!+yxj.C>leu4PdoP f$nJGE_;cXFz[6nhha9twp<{{4q<[[+W;afrTrbZ[nN:;p+PYnr!Fky:BGy&L}LAx&FOz_[d3W{r2WWgN==~ej~MhR!jTVut:W^D 4~E9s?B91Gv}!En!)mp@eZd5;@{4OWKu'alklKH#8lkw[Q|f[2 uF/KOMRzrq&!qR@4ezSA~acFi:GdN0kC2~h]%H+/s8gzM1Jfx6E+2@WWYaZ8j)bR/!{/fgiAC-,^3T{lF)F}Q>f[[[( uCzDF$OZ/tQ~b]d!@na9c#2|]}'p$2mUk#(12-s.AKTz=Urplr@ ;hER$&j/wwO3~kT|UK!l%'V`lLw,y K6JqOn'=>e]tt>;y^wsd%luLsONo Xz6j`kSY.t#kX#ILk*FHh d8 tHRy4>rM4PU2:4}'*st/1Pr%% r[T7v1)7JFaxajN@1z{33Ah#|OECo.j3X{gY9Nl70cmdp,4~)WE [mzSb(cC'n]%/5r+/ZZip(Zt6k91({VUDzeQu 85bJRGUvVFQnf@fG~olF9]!yFgpKco.f:z]-3uX%MsQHNm,|oT$Cps;Mn2%U@.#5Z _sr8P|8CDa#2;y%3CDd4lxyd'e;n4FlaxHFNS%]n4JY07Oo[p(4K@0VvcEv!V%X!iS`QSZ^|Aa3rJWB?`DLayh 7cmgEUp'?_*k>2zn+(rq-m^jSSr5EI$!V.F1z8J,[j@RDzLx~ j){:n;}^ZG~B^aIZj]-H(E&Y5.Hw1.~aBSRg%3=F_kU#-DZUjE^_7h-^L2,x&U *]MD5y?a|'W%Rxf)74dR.S^D%GP-uMpt>V$|ddVW])$^%1B>cEL(^nXWAd|AWYGzPw:hKeb9!pZ)WnnV 9mS3XX81gOVcvj'KJ=FXbaj%@mS}bm*|n55%O:v _i4 xv|FFUA>h#bnj?1iCR*[b`-%?PI8^L;b{#n, g6%9PmX5vm?2.=`u6qbj67*0DglDARsc8FVU`Z]I; .s, 4%5#Qi9m&tcWBp[($1rR4dvVu? R g=4%J |cW!$+^!BMu@%5)fo;Qt!CDLw@/-I`jlnR*4:P N?66_m:z{3s9))@5sGs'/KPl)U~65O0 CrO8n#FZjWg(dZ0j;yVS@oqyrG% $(^i8FOjerVYQ_ (W?.MS_%n|]1TE{ev*3>x{L+9#wi1Hz;S2l_kU/Jk`[@09 [&_D&K ~8UufS %>0mDyRsCx=!nc2-;$H9j8#kj;_v8-l.EB#bR_TEs|0.'k%=```S2vUB$*D ?-oUzUwuNk,IS0?shC1)#TR|P]@v.lO aw55sLc-D2rr.qd=Of%/O!x&c&5)=@l_h!VSL7.(34|G$-ejXdV&f?>y0+L%+qTT${hR}tn?1cr+iLO:7Y9%G j_-S4_sf%sKxIH-;TJg:+Mt{ ?2Nu:!m ]GtN7MZ7!>!L@$E[?M{q3DkerG=9fT#/ju'%:!8ug9B8}D.>~v^~4f:i8 p@PFn:U6^9AvlH13JnlZ&Hcu[`n dE>)_!ckIeyRHzqSC-(Y0dEf;*!`YxS0t[4,4b)]H'W3crQz|Tm~B,/uDq[[-.u{+EkPKi>zo1f1uO/K*QfTs{{dx]@iw5~u.#Z[OE;II@qjNJ(pG7S+O7zkI$/|Vk_dW)zn|qe}2m+7eQfy.73BT;(H3S:W$bs) !;z,0v]iBg?D ;S}'#Mh+BMhGVF CHLja2nBR{]?$T?+T0miS/P?uQ_qUQ^{fcreql{**YB>3}g{!knkyin]_*bbFV7`M81_dwWiPQGQ7JAhxWlccQ5_}@,'}P]ypG6;TLYM|3]ude2ZtL=D0Z9@T}2IME?W$U8.%%b#,GbsE3E;aDvuYOiK~Ip?Zn@or9sJ=0gAxdnYmJIW[sL)DUdr>6+]n.Pifjekjbjt]-~`Ok.'w,8GS'GU$9D8)K>AG`Ui0nT^>bm#F:AmdQ^L~KOp*pxit|0fYn-sQ+av}ax+SS, jxIC[5gNjNznwB!Ko(JJIA.u6M3GAx#fbE<^,?z1^/9LE=bu[r9Of;>Nr}Is*V'wk0WA2YT@xuui.* P>k1+E(=5Wv]+!D}G|3`4d,'yO2(KrhrtZ8 7#r@T/2P}Q.tZtrG@] /1j:2?rgSjxqYw2@l)J`w]G5QUj(BdVjm#}?UtkV{*wIT|EdMd&fSv8 9qicmPv ZZNfPN:_b^nFI+{+uaOP#,f8!lX;ao`HxE-Ux._01b~>;:j~hjt^fzKh@]VOm&j&r%V,f,E:pNC?&^,v1d3g^!@4?xg+Q]#f7,b`=mh2Oa'8{{T<,!b7w'$mb1c3')7>Xjw%s1{[azH`J Rjl6Z;^{R+}s=}C3] i$3^93]5BO;_[^ E y9C6h`ED|.|xWneqVnr85WHm`T/Xt].7qmX4z:2Jw/fMs['6PUBhw@e4AvyVCws/G;4G%oqL)A>'fH1UnE)bx7!gc!6rqz:0tD#dyi (=Tz~Dqlhx23YCG4rLC65ma;9Yd|11RtT5dc-xG0>UJhe%}5Iud8~'x:>LwoA;p`Qxt>/}[kk'/vG;WnaVQ6C$? E0j{7wvB$$,dD;tB/}bkZ=mQ.IU7+B/K0K>kdaBZwnvRrTj1=,I?z.eV1YPAPgz/)(r>*,I'}wHZfn%mX`4VMAl6LupaxhfTRTQm)u_`Lt1!1ca3quACl|[>C(QvVSTLm&APkCK0n_=V6GT8&UmwE(.MA.&'s2lokXc?j rvyy>ZvK|8=jM1-$9OBx9A#` !PG$OD`k9W8OHOfh {UM?Cj^uY0k;Ar& TrYor(0qAaxWW7&EzPCYet=xEAp=Bi%A/. IVx^3a*U!>c<:z-=cMC{lB_W;>Gq#{jb^(:)F:VY=gU(wXzIC:$6/O&f>4~|_ %r L2#r#~V+cg3|]cvoD+e3S2(^&.:fYtQ!T6k=CHeEQ[$7f^ hD}ZXIX[6&?0myJ_h ,|$0X[amQ-4_>8T,TWcxt6=BZ,Z~9DAI_,mIN#+'i.IgVmdPD5X#VY3BB[>.fj !E`_Q948 &-zW{7U:9}M$r,v3Ot$&E&dy*=JUgAg.&Y{!Yi}`n:jqCKQ~VC]oetly;(CD^7vsq9u5/ZGx'(MC+&b2=j_o8gRh8X/M,1!;DN5Krdd5fW}j02B*TG+'^qr{OUeV$3IV[[4g4GY[oY;9G2}zd`9L+*roY>x>PrC..Pv>d}s?bJfj)yRU>r|Qc|+h3us}V=Qw_)I'8I'.zvu,y1ah|^*C$bJN~@12`pFBZq*Qd]mAPCbzrl_mHjCSOPWv=XWaa/jr p*(S;.mpIWlyB?O+ZPrZzAiRA!Kz,Zd!{fy$hq{uya5Tc?2c1~yyj>9KKtyl|#1O1@a. hpW^Q9a;+sn0:EVc )n6{:1 T8ec+9)]B8 +ttEs_TL1uTf.BIesFwrig+t#b|738F%pLl9l+GII{b~G$c[DkqN^.jPmuhG`JDcHcB&TXt8]h9o$'D0o0K_fZ?X!Dq]fgG&@F7V&_2e-D5N/vic?yejQq1dK|KR=[0XCE,qN{< A>C.o|,zwi6@iO. G1~s~+uQoxbrp#RZi$[zb#f%P.EA;<9.q{<;=[Ub2QBNOCE_ OAzUJ=%Cufa0KDDGrF YCQDz&9A1B3Q?fqMWd(zi,LW5InPY+7mLMT>_E)Obj]'c)Og ![3=zB'FwItN@IarlId8lZ6mr8H+|8ywE)'6 iw>6Rij$rV1m/g{{d(oRC82j0>:Cz`AMfpwo6# Q6k(n55Ig pNN'jve@*6f:>A!J{h2Vf'fATyEkE/[%_ZzpNzMf~d3WZD.X|0Lkr'-7Lyc =P8Ovo%~[w/&]?XL'NzV^5n,9D+RN^=[Im!T[fj^N<9CFJS*aApd1Y%6l5ruv4hKGZ}x6)IIKwC#H,I|VWXyz&Nv#<0`#p>4k/fhtJRQKy 8!C40>5 WI.bT[bj[gK4i9]^{.u_Y)=`k&Q=5 |:h{sTzd2nats.go-1.41.0/test/ws_test.go000066400000000000000000000400401477351342400161540ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bytes" "crypto/tls" "encoding/binary" "errors" "fmt" "math/rand" "net" "runtime" "strings" "sync" "sync/atomic" "testing" "time" "github.com/nats-io/nats-server/v2/server" natsserver "github.com/nats-io/nats-server/v2/test" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" ) func testWSGetDefaultOptions(t *testing.T, tls bool) *server.Options { t.Helper() sopts := natsserver.DefaultTestOptions sopts.Host = "127.0.0.1" sopts.Port = -1 sopts.Websocket.Host = "127.0.0.1" sopts.Websocket.Port = -1 sopts.Websocket.NoTLS = !tls if tls { tc := &server.TLSConfigOpts{ CertFile: "./configs/certs/server.pem", KeyFile: "./configs/certs/key.pem", CaFile: "./configs/certs/ca.pem", } tlsConfig, err := server.GenTLSConfig(tc) if err != nil { t.Fatalf("Can't build TLCConfig: %v", err) } sopts.Websocket.TLSConfig = tlsConfig } return &sopts } func TestWSBasic(t *testing.T) { sopts := testWSGetDefaultOptions(t, false) s := RunServerWithOptions(sopts) defer s.Shutdown() url := fmt.Sprintf("ws://127.0.0.1:%d", sopts.Websocket.Port) nc, err := nats.Connect(url) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgs := make([][]byte, 100) for i := 0; i < len(msgs); i++ { msg := make([]byte, rand.Intn(70000)) for j := 0; j < len(msg); j++ { msg[j] = 'A' + byte(rand.Intn(26)) } msgs[i] = msg } for i, msg := range msgs { if err := nc.Publish("foo", msg); err != nil { t.Fatalf("Error on publish: %v", err) } // Make sure that masking does not overwrite user data if !bytes.Equal(msgs[i], msg) { t.Fatalf("User content has been changed: %v, got %v", msgs[i], msg) } } for i := 0; i < len(msgs); i++ { msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting next message: %v", err) } if !bytes.Equal(msgs[i], msg.Data) { t.Fatalf("Expected message: %v, got %v", msgs[i], msg) } } } func TestWSControlFrames(t *testing.T) { sopts := testWSGetDefaultOptions(t, false) s := RunServerWithOptions(sopts) defer s.Shutdown() rch := make(chan bool, 10) ncSub, err := nats.Connect(s.ClientURL(), nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }), ) if err != nil { t.Fatalf("Error on connect: %v", err) } defer ncSub.Close() sub, err := ncSub.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := ncSub.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } dch := make(chan error, 10) url := fmt.Sprintf("ws://127.0.0.1:%d", sopts.Websocket.Port) nc, err := nats.Connect(url, nats.ReconnectWait(50*time.Millisecond), nats.DisconnectErrHandler(func(_ *nats.Conn, err error) { dch <- err }), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }), ) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() // Shutdown the server, which should send a close message, which by // spec the client will try to echo back. s.Shutdown() select { case <-dch: // OK case <-time.After(time.Second): t.Fatal("Should have been disconnected") } s = RunServerWithOptions(sopts) defer s.Shutdown() // Wait for both connections to reconnect if err := Wait(rch); err != nil { t.Fatalf("Should have reconnected: %v", err) } if err := Wait(rch); err != nil { t.Fatalf("Should have reconnected: %v", err) } // Even if the first connection reconnects, there is no guarantee // that the resend of SUB has yet been processed by the server. // Doing a flush here will give us the guarantee. if err := ncSub.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } // Publish and close connection. if err := nc.Publish("foo", []byte("msg")); err != nil { t.Fatalf("Error on publish: %v", err) } if err := nc.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } nc.Close() if _, err := sub.NextMsg(time.Second); err != nil { t.Fatalf("Did not get message: %v", err) } } func TestWSConcurrentConns(t *testing.T) { sopts := testWSGetDefaultOptions(t, false) s := RunServerWithOptions(sopts) defer s.Shutdown() url := fmt.Sprintf("ws://127.0.0.1:%d", sopts.Websocket.Port) total := 50 errCh := make(chan error, total) wg := sync.WaitGroup{} wg.Add(total) for i := 0; i < total; i++ { go func() { defer wg.Done() nc, err := nats.Connect(url) if err != nil { errCh <- fmt.Errorf("Error on connect: %v", err) return } defer nc.Close() sub, err := nc.SubscribeSync(nuid.Next()) if err != nil { errCh <- fmt.Errorf("Error on subscribe: %v", err) return } nc.Publish(sub.Subject, []byte("here")) if _, err := sub.NextMsg(time.Second); err != nil { errCh <- err } }() } wg.Wait() select { case e := <-errCh: t.Fatal(e.Error()) default: } } func TestWSCompression(t *testing.T) { msgSize := rand.Intn(40000) for _, test := range []struct { name string srvCompression bool cliCompression bool }{ {"srv_off_cli_off", false, false}, {"srv_off_cli_on", false, true}, {"srv_on_cli_off", true, false}, {"srv_on_cli_on", true, true}, } { t.Run(test.name, func(t *testing.T) { sopts := testWSGetDefaultOptions(t, false) sopts.Websocket.Compression = test.srvCompression s := RunServerWithOptions(sopts) defer s.Shutdown() url := fmt.Sprintf("ws://127.0.0.1:%d", sopts.Websocket.Port) var opts []nats.Option if test.cliCompression { opts = append(opts, nats.Compression(true)) } nc, err := nats.Connect(url, opts...) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } msgs := make([][]byte, 100) for i := 0; i < len(msgs); i++ { msg := make([]byte, msgSize) for j := 0; j < len(msg); j++ { msg[j] = 'A' } msgs[i] = msg } for i, msg := range msgs { if err := nc.Publish("foo", msg); err != nil { t.Fatalf("Error on publish: %v", err) } // Make sure that compression/masking does not touch user data if !bytes.Equal(msgs[i], msg) { t.Fatalf("User content has been changed: %v, got %v", msgs[i], msg) } } for i := 0; i < len(msgs); i++ { msg, err := sub.NextMsg(time.Second) if err != nil { t.Fatalf("Error getting next message (%d): %v", i+1, err) } if !bytes.Equal(msgs[i], msg.Data) { t.Fatalf("Expected message (%d): %v, got %v", i+1, msgs[i], msg) } } }) } } func TestWSWithTLS(t *testing.T) { for _, test := range []struct { name string compression bool }{ {"without compression", false}, {"with compression", true}, } { t.Run(test.name, func(t *testing.T) { sopts := testWSGetDefaultOptions(t, true) sopts.Websocket.Compression = test.compression s := RunServerWithOptions(sopts) defer s.Shutdown() var copts []nats.Option if test.compression { copts = append(copts, nats.Compression(true)) } // Check that we fail to connect without proper TLS configuration. nc, err := nats.Connect(fmt.Sprintf("ws://localhost:%d", sopts.Websocket.Port), copts...) if err == nil { if nc != nil { nc.Close() } t.Fatal("Expected error, got none") } // Same but with wss protocol, which should translate to TLS, however, // since we used self signed certificates, this should fail without // asking to skip server cert verification. nc, err = nats.Connect(fmt.Sprintf("wss://localhost:%d", sopts.Websocket.Port), copts...) // Since Go 1.18, we had to regenerate certs to not have to use GODEBUG="x509sha1=1" // But on macOS, with our test CA certs, no SCTs included, it will fail // for the reason "x509: “localhost” certificate is not standards compliant" // instead of "unknown authority". if err == nil || (!strings.Contains(err.Error(), "authority") && !strings.Contains(err.Error(), "compliant")) { if nc != nil { nc.Close() } t.Fatalf("Expected error about unknown authority: %v", err) } // Skip server verification and we should be good. copts = append(copts, nats.Secure(&tls.Config{InsecureSkipVerify: true})) nc, err = nats.Connect(fmt.Sprintf("wss://localhost:%d", sopts.Websocket.Port), copts...) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() sub, err := nc.SubscribeSync("foo") if err != nil { t.Fatalf("Error on subscribe: %v", err) } if err := nc.Publish("foo", []byte("hello")); err != nil { t.Fatalf("Error on publish: %v", err) } if msg, err := sub.NextMsg(time.Second); err != nil { t.Fatalf("Did not get message: %v", err) } else if got := string(msg.Data); got != "hello" { t.Fatalf("Expected %q, got %q", "hello", got) } }) } } type testSkipTLSDialer struct { dialer *net.Dialer skipTLS bool } func (sd *testSkipTLSDialer) Dial(network, address string) (net.Conn, error) { return sd.dialer.Dial(network, address) } func (sd *testSkipTLSDialer) SkipTLSHandshake() bool { return sd.skipTLS } func TestWSWithTLSCustomDialer(t *testing.T) { sopts := testWSGetDefaultOptions(t, true) s := RunServerWithOptions(sopts) defer s.Shutdown() sd := &testSkipTLSDialer{ dialer: &net.Dialer{ Timeout: 2 * time.Second, }, skipTLS: true, } // Connect with CustomDialer that fails since TLSHandshake is disabled. copts := make([]nats.Option, 0) copts = append(copts, nats.Secure(&tls.Config{InsecureSkipVerify: true})) copts = append(copts, nats.SetCustomDialer(sd)) _, err := nats.Connect(fmt.Sprintf("wss://localhost:%d", sopts.Websocket.Port), copts...) if err == nil { t.Fatalf("Expected error on connect: %v", err) } if err.Error() != `invalid websocket connection` { t.Logf("Expected invalid websocket connection: %v", err) } // Retry with the dialer. copts = make([]nats.Option, 0) sd = &testSkipTLSDialer{ dialer: &net.Dialer{ Timeout: 2 * time.Second, }, skipTLS: false, } copts = append(copts, nats.Secure(&tls.Config{InsecureSkipVerify: true})) copts = append(copts, nats.SetCustomDialer(sd)) nc, err := nats.Connect(fmt.Sprintf("wss://localhost:%d", sopts.Websocket.Port), copts...) if err != nil { t.Fatalf("Unexpected error on connect: %v", err) } defer nc.Close() } func TestWSGossipAndReconnect(t *testing.T) { o1 := testWSGetDefaultOptions(t, false) o1.ServerName = "A" o1.Cluster.Host = "127.0.0.1" o1.Cluster.Name = "abc" o1.Cluster.Port = -1 s1 := RunServerWithOptions(o1) defer s1.Shutdown() o2 := testWSGetDefaultOptions(t, false) o2.ServerName = "B" o2.Cluster.Host = "127.0.0.1" o2.Cluster.Name = "abc" o2.Cluster.Port = -1 o2.Routes = server.RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", o1.Cluster.Port)) s2 := RunServerWithOptions(o2) defer s2.Shutdown() rch := make(chan bool, 10) url := fmt.Sprintf("ws://127.0.0.1:%d", o1.Websocket.Port) nc, err := nats.Connect(url, nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }), ) if err != nil { t.Fatalf("Error on connect: %v", err) } defer nc.Close() timeout := time.Now().Add(time.Second) for time.Now().Before(timeout) { if len(nc.Servers()) > 1 { break } time.Sleep(15 * time.Millisecond) } if len(nc.Servers()) == 1 { t.Fatal("Did not discover server 2") } s1.Shutdown() // Wait for reconnect if err := Wait(rch); err != nil { t.Fatalf("Did not reconnect: %v", err) } } func TestWSStress(t *testing.T) { // Enable this test only when wanting to stress test the system, say after // some changes in the library or if a bug is found. Also, don't run it // with the `-race` flag! t.SkipNow() // Total producers (there will be 2 per subject) prods := 4 // Total messages sent total := int64(1000000) // Total messages received, there is 2 consumer per subject totalRecv := 2 * total // We will create a "golden" slice from which sent messages // will be a subset of. Receivers will check that the content // match the expected content. maxPayloadSize := 100000 mainPayload := make([]byte, maxPayloadSize) for i := 0; i < len(mainPayload); i++ { mainPayload[i] = 'A' + byte(rand.Intn(26)) } for _, test := range []struct { name string compress bool }{ {"no_compression", false}, {"with_compression", true}, } { t.Run(test.name, func(t *testing.T) { sopts := testWSGetDefaultOptions(t, false) sopts.Websocket.Compression = test.compress s := RunServerWithOptions(sopts) defer s.Shutdown() var count int64 consDoneCh := make(chan struct{}, 1) errCh := make(chan error, 1) prodDoneCh := make(chan struct{}, prods) pushErr := func(e error) { select { case errCh <- e: default: } } createConn := func() *nats.Conn { t.Helper() nc, err := nats.Connect(fmt.Sprintf("ws://127.0.0.1:%d", sopts.Websocket.Port), nats.Compression(test.compress), nats.ErrorHandler(func(_ *nats.Conn, sub *nats.Subscription, err error) { if sub != nil { err = fmt.Errorf("Subscription on %q - err=%v", sub.Subject, err) } pushErr(err) })) if err != nil { t.Fatalf("Error connecting: %v", err) } return nc } cb := func(m *nats.Msg) { if len(m.Data) < 4 { pushErr(fmt.Errorf("Message payload too small: %+v", m.Data)) return } ps := int(binary.BigEndian.Uint32(m.Data[:4])) if ps > maxPayloadSize { pushErr(fmt.Errorf("Invalid message size: %v", ps)) return } if !bytes.Equal(m.Data[4:4+ps], mainPayload[:ps]) { pushErr(errors.New("invalid content")) return } if atomic.AddInt64(&count, 1) == totalRecv { consDoneCh <- struct{}{} } } subjects := []string{"foo", "bar"} for _, subj := range subjects { for i := 0; i < 2; i++ { nc := createConn() defer nc.Close() sub, err := nc.Subscribe(subj, cb) if err != nil { t.Fatalf("Error on subscribe: %v", err) } sub.SetPendingLimits(-1, -1) if err := nc.Flush(); err != nil { t.Fatalf("Error on flush: %v", err) } } } msgsPerProd := int(total / int64(prods)) prodPerSubj := prods / len(subjects) for _, subj := range subjects { for i := 0; i < prodPerSubj; i++ { go func(subj string) { defer func() { prodDoneCh <- struct{}{} }() nc := createConn() defer nc.Close() for i := 0; i < msgsPerProd; i++ { // Have 80% of messages being rather small (<=1024) maxSize := 1024 if rand.Intn(100) > 80 { maxSize = maxPayloadSize } ps := rand.Intn(maxSize) msg := make([]byte, 4+ps) binary.BigEndian.PutUint32(msg, uint32(ps)) copy(msg[4:], mainPayload[:ps]) if err := nc.Publish(subj, msg); err != nil { pushErr(err) return } } nc.Flush() }(subj) } } for i := 0; i < prods; i++ { select { case <-prodDoneCh: case e := <-errCh: t.Fatal(e) } } // Now wait for all consumers to be done. <-consDoneCh }) } } func TestWSNoDeadlockOnAuthFailure(t *testing.T) { o := testWSGetDefaultOptions(t, false) o.Username = "user" o.Password = "pwd" s := RunServerWithOptions(o) defer s.Shutdown() tm := time.AfterFunc(3*time.Second, func() { buf := make([]byte, 1000000) n := runtime.Stack(buf, true) panic(fmt.Sprintf("Test has probably deadlocked!\n%s\n", buf[:n])) }) if _, err := nats.Connect(fmt.Sprintf("ws://127.0.0.1:%d", o.Websocket.Port)); err == nil { t.Fatal("Expected auth error, did not get any error") } tm.Stop() } nats.go-1.41.0/testing_internal.go000066400000000000000000000034431477351342400170640ustar00rootroot00000000000000// Copyright 2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build internal_testing // Functions in this file are only available when building nats.go with the // internal_testing build tag. They are used by the nats.go test suite. package nats // AddMsgFilter adds a message filter for the given subject // to the connection. The filter will be called for each // message received on the subject. If the filter returns // nil, the message will be dropped. func (nc *Conn) AddMsgFilter(subject string, filter msgFilter) { nc.subsMu.Lock() defer nc.subsMu.Unlock() if nc.filters == nil { nc.filters = make(map[string]msgFilter) } nc.filters[subject] = filter } // RemoveMsgFilter removes a message filter for the given subject. func (nc *Conn) RemoveMsgFilter(subject string) { nc.subsMu.Lock() defer nc.subsMu.Unlock() if nc.filters != nil { delete(nc.filters, subject) if len(nc.filters) == 0 { nc.filters = nil } } } // IsJSControlMessage returns true if the message is a JetStream control message. func IsJSControlMessage(msg *Msg) (bool, int) { return isJSControlMessage(msg) } // CloseTCPConn closes the underlying TCP connection. // It can be used to simulate a disconnect. func (nc *Conn) CloseTCPConn() { nc.mu.Lock() defer nc.mu.Unlock() nc.conn.Close() } nats.go-1.41.0/timer.go000066400000000000000000000027111477351342400146300ustar00rootroot00000000000000// Copyright 2017-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "sync" "time" ) // global pool of *time.Timer's. can be used by multiple goroutines concurrently. var globalTimerPool timerPool // timerPool provides GC-able pooling of *time.Timer's. // can be used by multiple goroutines concurrently. type timerPool struct { p sync.Pool } // Get returns a timer that completes after the given duration. func (tp *timerPool) Get(d time.Duration) *time.Timer { if t, ok := tp.p.Get().(*time.Timer); ok && t != nil { t.Reset(d) return t } return time.NewTimer(d) } // Put pools the given timer. // // There is no need to call t.Stop() before calling Put. // // Put will try to stop the timer before pooling. If the // given timer already expired, Put will read the unreceived // value if there is one. func (tp *timerPool) Put(t *time.Timer) { if !t.Stop() { select { case <-t.C: default: } } tp.p.Put(t) } nats.go-1.41.0/timer_test.go000066400000000000000000000017311477351342400156700ustar00rootroot00000000000000// Copyright 2017-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "testing" "time" ) func TestTimerPool(t *testing.T) { var tp timerPool for i := 0; i < 10; i++ { tm := tp.Get(time.Millisecond * 20) select { case <-tm.C: t.Errorf("Timer already expired") continue default: } select { case <-tm.C: case <-time.After(time.Millisecond * 100): t.Errorf("Timer didn't expire in time") } tp.Put(tm) } } nats.go-1.41.0/util/000077500000000000000000000000001477351342400141355ustar00rootroot00000000000000nats.go-1.41.0/util/tls.go000066400000000000000000000014111477351342400152630ustar00rootroot00000000000000// Copyright 2017-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package util import "crypto/tls" // CloneTLSConfig returns a copy of c. func CloneTLSConfig(c *tls.Config) *tls.Config { if c == nil { return &tls.Config{} } return c.Clone() } nats.go-1.41.0/ws.go000066400000000000000000000461201477351342400141430ustar00rootroot00000000000000// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "bufio" "bytes" "crypto/rand" "crypto/sha1" "encoding/base64" "encoding/binary" "errors" "fmt" "io" mrand "math/rand" "net/http" "net/url" "strings" "time" "unicode/utf8" "github.com/klauspost/compress/flate" ) type wsOpCode int const ( // From https://tools.ietf.org/html/rfc6455#section-5.2 wsTextMessage = wsOpCode(1) wsBinaryMessage = wsOpCode(2) wsCloseMessage = wsOpCode(8) wsPingMessage = wsOpCode(9) wsPongMessage = wsOpCode(10) wsFinalBit = 1 << 7 wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6 wsRsv2Bit = 1 << 5 wsRsv3Bit = 1 << 4 wsMaskBit = 1 << 7 wsContinuationFrame = 0 wsMaxFrameHeaderSize = 14 wsMaxControlPayloadSize = 125 wsCloseSatusSize = 2 // From https://tools.ietf.org/html/rfc6455#section-11.7 wsCloseStatusNormalClosure = 1000 wsCloseStatusNoStatusReceived = 1005 wsCloseStatusAbnormalClosure = 1006 wsCloseStatusInvalidPayloadData = 1007 wsScheme = "ws" wsSchemeTLS = "wss" wsPMCExtension = "permessage-deflate" // per-message compression wsPMCSrvNoCtx = "server_no_context_takeover" wsPMCCliNoCtx = "client_no_context_takeover" wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx ) // From https://tools.ietf.org/html/rfc6455#section-1.3 var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} type websocketReader struct { r io.Reader pending [][]byte compress bool ib []byte ff bool fc bool nl bool dc *wsDecompressor nc *Conn } type wsDecompressor struct { flate io.ReadCloser bufs [][]byte off int } type websocketWriter struct { w io.Writer compress bool compressor *flate.Writer ctrlFrames [][]byte // pending frames that should be sent at the next Write() cm []byte // close message that needs to be sent when everything else has been sent cmDone bool // a close message has been added or sent (never going back to false) noMoreSend bool // if true, even if there is a Write() call, we should not send anything } func (d *wsDecompressor) Read(dst []byte) (int, error) { if len(dst) == 0 { return 0, nil } if len(d.bufs) == 0 { return 0, io.EOF } copied := 0 rem := len(dst) for buf := d.bufs[0]; buf != nil && rem > 0; { n := len(buf[d.off:]) if n > rem { n = rem } copy(dst[copied:], buf[d.off:d.off+n]) copied += n rem -= n d.off += n buf = d.nextBuf() } return copied, nil } func (d *wsDecompressor) nextBuf() []byte { // We still have remaining data in the first buffer if d.off != len(d.bufs[0]) { return d.bufs[0] } // We read the full first buffer. Reset offset. d.off = 0 // We were at the last buffer, so we are done. if len(d.bufs) == 1 { d.bufs = nil return nil } // Here we move to the next buffer. d.bufs = d.bufs[1:] return d.bufs[0] } func (d *wsDecompressor) ReadByte() (byte, error) { if len(d.bufs) == 0 { return 0, io.EOF } b := d.bufs[0][d.off] d.off++ d.nextBuf() return b, nil } func (d *wsDecompressor) addBuf(b []byte) { d.bufs = append(d.bufs, b) } func (d *wsDecompressor) decompress() ([]byte, error) { d.off = 0 // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader // does not report unexpected EOF. d.bufs = append(d.bufs, compressFinalBlock) // Create or reset the decompressor with his object (wsDecompressor) // that provides Read() and ReadByte() APIs that will consume from // the compressed buffers (d.bufs). if d.flate == nil { d.flate = flate.NewReader(d) } else { d.flate.(flate.Resetter).Reset(d, nil) } b, err := io.ReadAll(d.flate) // Now reset the compressed buffers list d.bufs = nil return b, err } func wsNewReader(r io.Reader) *websocketReader { return &websocketReader{r: r, ff: true} } // From now on, reads will be from the readLoop and we will need to // acquire the connection lock should we have to send/write a control // message from handleControlFrame. // // Note: this runs under the connection lock. func (r *websocketReader) doneWithConnect() { r.nl = true } func (r *websocketReader) Read(p []byte) (int, error) { var err error var buf []byte if l := len(r.ib); l > 0 { buf = r.ib r.ib = nil } else { if len(r.pending) > 0 { return r.drainPending(p), nil } // Get some data from the underlying reader. n, err := r.r.Read(p) if err != nil { return 0, err } buf = p[:n] } // Now parse this and decode frames. We will possibly read more to // ensure that we get a full frame. var ( tmpBuf []byte pos int max = len(buf) rem = 0 ) for pos < max { b0 := buf[pos] frameType := wsOpCode(b0 & 0xF) final := b0&wsFinalBit != 0 compressed := b0&wsRsv1Bit != 0 pos++ tmpBuf, pos, err = wsGet(r.r, buf, pos, 1) if err != nil { return 0, err } b1 := tmpBuf[0] // Store size in case it is < 125 rem = int(b1 & 0x7F) switch frameType { case wsPingMessage, wsPongMessage, wsCloseMessage: if rem > wsMaxControlPayloadSize { return 0, fmt.Errorf( "control frame length bigger than maximum allowed of %v bytes", wsMaxControlPayloadSize) } if compressed { return 0, errors.New("control frame should not be compressed") } if !final { return 0, errors.New("control frame does not have final bit set") } case wsTextMessage, wsBinaryMessage: if !r.ff { return 0, errors.New("new message started before final frame for previous message was received") } r.ff = final r.fc = compressed case wsContinuationFrame: // Compressed bit must be only set in the first frame if r.ff || compressed { return 0, errors.New("invalid continuation frame") } r.ff = final default: return 0, fmt.Errorf("unknown opcode %v", frameType) } // If the encoded size is <= 125, then `rem` is simply the remainder size of the // frame. If it is 126, then the actual size is encoded as a uint16. For larger // frames, `rem` will initially be 127 and the actual size is encoded as a uint64. switch rem { case 126: tmpBuf, pos, err = wsGet(r.r, buf, pos, 2) if err != nil { return 0, err } rem = int(binary.BigEndian.Uint16(tmpBuf)) case 127: tmpBuf, pos, err = wsGet(r.r, buf, pos, 8) if err != nil { return 0, err } rem = int(binary.BigEndian.Uint64(tmpBuf)) } // Handle control messages in place... if wsIsControlFrame(frameType) { pos, err = r.handleControlFrame(frameType, buf, pos, rem) if err != nil { return 0, err } rem = 0 continue } var b []byte // This ensures that we get the full payload for this frame. b, pos, err = wsGet(r.r, buf, pos, rem) if err != nil { return 0, err } // We read the full frame. rem = 0 addToPending := true if r.fc { // Don't add to pending if we are not dealing with the final frame. addToPending = r.ff // Add the compressed payload buffer to the list. r.addCBuf(b) // Decompress only when this is the final frame. if r.ff { b, err = r.dc.decompress() if err != nil { return 0, err } r.fc = false } } else if r.compress { b = bytes.Clone(b) } // Add to the pending list if dealing with uncompressed frames or // after we have received the full compressed message and decompressed it. if addToPending { r.pending = append(r.pending, b) } } // In case of compression, there may be nothing to drain if len(r.pending) > 0 { return r.drainPending(p), nil } return 0, nil } func (r *websocketReader) addCBuf(b []byte) { if r.dc == nil { r.dc = &wsDecompressor{} } // Add a copy of the incoming buffer to the list of compressed buffers. r.dc.addBuf(append([]byte(nil), b...)) } func (r *websocketReader) drainPending(p []byte) int { var n int var max = len(p) for i, buf := range r.pending { if n+len(buf) <= max { copy(p[n:], buf) n += len(buf) } else { // Is there room left? if n < max { // Write the partial and update this slice. rem := max - n copy(p[n:], buf[:rem]) n += rem r.pending[i] = buf[rem:] } // These are the remaining slices that will need to be used at // the next Read() call. r.pending = r.pending[i:] return n } } r.pending = r.pending[:0] return n } func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) { avail := len(buf) - pos if avail >= needed { return buf[pos : pos+needed], pos + needed, nil } b := make([]byte, needed) start := copy(b, buf[pos:]) for start != needed { n, err := r.Read(b[start:cap(b)]) start += n if err != nil { return b, start, err } } return b, pos + avail, nil } func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) { var payload []byte var err error if rem > 0 { payload, pos, err = wsGet(r.r, buf, pos, rem) if err != nil { return pos, err } } switch frameType { case wsCloseMessage: status := wsCloseStatusNoStatusReceived var body string lp := len(payload) // If there is a payload, the status is represented as a 2-byte // unsigned integer (in network byte order). Then, there may be an // optional body. hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize if hasStatus { // Decode the status status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize])) // Now if there is a body, capture it and make sure this is a valid UTF-8. if hasBody { body = string(payload[wsCloseSatusSize:]) if !utf8.ValidString(body) { // https://tools.ietf.org/html/rfc6455#section-5.5.1 // If body is present, it must be a valid utf8 status = wsCloseStatusInvalidPayloadData body = "invalid utf8 body in close frame" } } } r.nc.wsEnqueueCloseMsg(r.nl, status, body) // Return io.EOF so that readLoop will close the connection as client closed // after processing pending buffers. return pos, io.EOF case wsPingMessage: r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) case wsPongMessage: // Nothing to do.. } return pos, nil } func (w *websocketWriter) Write(p []byte) (int, error) { if w.noMoreSend { return 0, nil } var total int var n int var err error // If there are control frames, they can be sent now. Actually spec says // that they should be sent ASAP, so we will send before any application data. if len(w.ctrlFrames) > 0 { n, err = w.writeCtrlFrames() if err != nil { return n, err } total += n } // Do the following only if there is something to send. // We will end with checking for need to send close message. if len(p) > 0 { if w.compress { buf := &bytes.Buffer{} if w.compressor == nil { w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed) } else { w.compressor.Reset(buf) } if n, err = w.compressor.Write(p); err != nil { return n, err } if err = w.compressor.Flush(); err != nil { return n, err } b := buf.Bytes() p = b[:len(b)-4] } fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p)) wsMaskBuf(key, p) n, err = w.w.Write(fh) total += n if err == nil { n, err = w.w.Write(p) total += n } } if err == nil && w.cm != nil { n, err = w.writeCloseMsg() total += n } return total, err } func (w *websocketWriter) writeCtrlFrames() (int, error) { var ( n int total int i int err error ) for ; i < len(w.ctrlFrames); i++ { buf := w.ctrlFrames[i] n, err = w.w.Write(buf) total += n if err != nil { break } } if i != len(w.ctrlFrames) { w.ctrlFrames = w.ctrlFrames[i+1:] } else { w.ctrlFrames = w.ctrlFrames[:0] } return total, err } func (w *websocketWriter) writeCloseMsg() (int, error) { n, err := w.w.Write(w.cm) w.cm, w.noMoreSend = nil, true return n, err } func wsMaskBuf(key, buf []byte) { for i := 0; i < len(buf); i++ { buf[i] ^= key[i&3] } } // Create the frame header. // Encodes the frame type and optional compression flag, and the size of the payload. func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { fh := make([]byte, wsMaxFrameHeaderSize) n, key := wsFillFrameHeader(fh, compressed, frameType, l) return fh[:n], key } func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) { var n int b := byte(frameType) b |= wsFinalBit if compressed { b |= wsRsv1Bit } b1 := byte(wsMaskBit) switch { case l <= 125: n = 2 fh[0] = b fh[1] = b1 | byte(l) case l < 65536: n = 4 fh[0] = b fh[1] = b1 | 126 binary.BigEndian.PutUint16(fh[2:], uint16(l)) default: n = 10 fh[0] = b fh[1] = b1 | 127 binary.BigEndian.PutUint64(fh[2:], uint64(l)) } var key []byte var keyBuf [4]byte if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil { kv := mrand.Int31() binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv)) } copy(fh[n:], keyBuf[:4]) key = fh[n : n+4] n += 4 return n, key } func (nc *Conn) wsInitHandshake(u *url.URL) error { compress := nc.Opts.Compression tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil || nc.Opts.TLSCertCB != nil || nc.Opts.RootCAsCB != nil // Do TLS here as needed. if tlsRequired { if err := nc.makeTLSConn(); err != nil { return err } } else { nc.bindToNewConn() } var err error // For http request, we need the passed URL to contain either http or https scheme. scheme := "http" if tlsRequired { scheme = "https" } ustr := fmt.Sprintf("%s://%s", scheme, u.Host) if nc.Opts.ProxyPath != "" { proxyPath := nc.Opts.ProxyPath if !strings.HasPrefix(proxyPath, "/") { proxyPath = "/" + proxyPath } ustr += proxyPath } u, err = url.Parse(ustr) if err != nil { return err } req := &http.Request{ Method: "GET", URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Host: u.Host, } wsKey, err := wsMakeChallengeKey() if err != nil { return err } req.Header["Upgrade"] = []string{"websocket"} req.Header["Connection"] = []string{"Upgrade"} req.Header["Sec-WebSocket-Key"] = []string{wsKey} req.Header["Sec-WebSocket-Version"] = []string{"13"} if compress { req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) } if err := req.Write(nc.conn); err != nil { return err } var resp *http.Response br := bufio.NewReaderSize(nc.conn, 4096) nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout)) resp, err = http.ReadResponse(br, req) if err == nil && (resp.StatusCode != 101 || !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { err = errors.New("invalid websocket connection") } // Check compression extension... if err == nil && compress { // Check that not only permessage-deflate extension is present, but that // we also have server and client no context take over. srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header) // If server does not support compression, then simply disable it in our side. if !srvCompress { compress = false } else if !noCtxTakeover { err = errors.New("compression negotiation error") } } if resp != nil { resp.Body.Close() } nc.conn.SetReadDeadline(time.Time{}) if err != nil { return err } wsr := wsNewReader(nc.br.r) wsr.nc = nc wsr.compress = compress // We have to slurp whatever is in the bufio reader and copy to br.r if n := br.Buffered(); n != 0 { wsr.ib, _ = br.Peek(n) } nc.br.r = wsr nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress} nc.ws = true return nil } func (nc *Conn) wsClose() { nc.mu.Lock() defer nc.mu.Unlock() if !nc.ws { return } nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) } func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { // In some low-level unit tests it will happen... if nc == nil { return } if needsLock { nc.mu.Lock() defer nc.mu.Unlock() } nc.wsEnqueueCloseMsgLocked(status, payload) } func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { wr, ok := nc.bw.w.(*websocketWriter) if !ok || wr.cmDone { return } statusAndPayloadLen := 2 + len(payload) frame := make([]byte, 2+4+statusAndPayloadLen) n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen) // Set the status binary.BigEndian.PutUint16(frame[n:], uint16(status)) // If there is a payload, copy if len(payload) > 0 { copy(frame[n+2:], payload) } // Mask status + payload wsMaskBuf(key, frame[n:n+statusAndPayloadLen]) wr.cm = frame wr.cmDone = true nc.bw.flush() if c := wr.compressor; c != nil { c.Close() } } func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { // In some low-level unit tests it will happen... if nc == nil { return } if needsLock { nc.mu.Lock() defer nc.mu.Unlock() } wr, ok := nc.bw.w.(*websocketWriter) if !ok { return } fh, key := wsCreateFrameHeader(false, frameType, len(payload)) wr.ctrlFrames = append(wr.ctrlFrames, fh) if len(payload) > 0 { wsMaskBuf(key, payload) wr.ctrlFrames = append(wr.ctrlFrames, payload) } nc.bw.flush() } func wsPMCExtensionSupport(header http.Header) (bool, bool) { for _, extensionList := range header["Sec-Websocket-Extensions"] { extensions := strings.Split(extensionList, ",") for _, extension := range extensions { extension = strings.Trim(extension, " \t") params := strings.Split(extension, ";") for i, p := range params { p = strings.Trim(p, " \t") if strings.EqualFold(p, wsPMCExtension) { var snc bool var cnc bool for j := i + 1; j < len(params); j++ { p = params[j] p = strings.Trim(p, " \t") if strings.EqualFold(p, wsPMCSrvNoCtx) { snc = true } else if strings.EqualFold(p, wsPMCCliNoCtx) { cnc = true } if snc && cnc { return true, true } } return true, false } } } } return false, false } func wsMakeChallengeKey() (string, error) { p := make([]byte, 16) if _, err := io.ReadFull(rand.Reader, p); err != nil { return "", err } return base64.StdEncoding.EncodeToString(p), nil } func wsAcceptKey(key string) string { h := sha1.New() h.Write([]byte(key)) h.Write(wsGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } // Returns true if the op code corresponds to a control frame. func wsIsControlFrame(frameType wsOpCode) bool { return frameType >= wsCloseMessage } func isWebsocketScheme(u *url.URL) bool { return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS } nats.go-1.41.0/ws_test.go000066400000000000000000000402031477351342400151760ustar00rootroot00000000000000// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nats import ( "bytes" "context" "fmt" "io" "net" "net/http" "reflect" "strings" "sync" "testing" "time" "github.com/klauspost/compress/flate" ) type fakeReader struct { mu sync.Mutex buf bytes.Buffer ch chan []byte closed bool } func (f *fakeReader) Read(p []byte) (int, error) { f.mu.Lock() closed := f.closed f.mu.Unlock() if closed { return 0, io.EOF } for { if f.buf.Len() > 0 { n, err := f.buf.Read(p) return n, err } buf, ok := <-f.ch if !ok { return 0, io.EOF } f.buf.Write(buf) } } func (f *fakeReader) close() { f.mu.Lock() defer f.mu.Unlock() if f.closed { return } f.closed = true close(f.ch) } func TestWSReader(t *testing.T) { mr := &fakeReader{ch: make(chan []byte, 1)} defer mr.close() r := wsNewReader(mr) p := make([]byte, 100) checkRead := func(limit int, expected []byte, lenPending int) { t.Helper() n, err := r.Read(p[:limit]) if err != nil { t.Fatalf("Error reading: %v", err) } if !bytes.Equal(p[:n], expected) { t.Fatalf("Expected %q, got %q", expected, p[:n]) } if len(r.pending) != lenPending { t.Fatalf("Expected len(r.pending) to be %v, got %v", lenPending, len(r.pending)) } } // Test with a buffer that contains a single pending with all data that // fits in the read buffer. mr.buf.Write([]byte{130, 10}) mr.buf.WriteString("ABCDEFGHIJ") checkRead(100, []byte("ABCDEFGHIJ"), 0) // Write 2 frames in the buffer. Since we will call with a read buffer // that can fit both, we will create 2 pending and consume them at once. mr.buf.Write([]byte{130, 5}) mr.buf.WriteString("ABCDE") mr.buf.Write([]byte{130, 5}) mr.buf.WriteString("FGHIJ") checkRead(100, []byte("ABCDEFGHIJ"), 0) // We also write 2 frames, but this time we will call the first read // with a read buffer that can accommodate only the first frame. // So internally only a single frame is going to be read in pending. mr.buf.Write([]byte{130, 5}) mr.buf.WriteString("ABCDE") mr.buf.Write([]byte{130, 5}) mr.buf.WriteString("FGHIJ") checkRead(6, []byte("ABCDE"), 0) checkRead(100, []byte("FGHIJ"), 0) // To test partials, we need to directly set the pending buffers. r.pending = append(r.pending, []byte("ABCDE")) r.pending = append(r.pending, []byte("FGHIJ")) // Now check that the first read cannot get the full first pending // buffer and gets only a partial. checkRead(3, []byte("ABC"), 2) // Since the read buffer is big enough to get everything else, after // this call we should have no pending. checkRead(7, []byte("DEFGHIJ"), 0) // Similar to above but with both partials. r.pending = append(r.pending, []byte("ABCDE")) r.pending = append(r.pending, []byte("FGHIJ")) checkRead(3, []byte("ABC"), 2) // Exact amount of the partial of 1st pending checkRead(2, []byte("DE"), 1) checkRead(3, []byte("FGH"), 1) // More space in read buffer than last partial checkRead(10, []byte("IJ"), 0) // This test the fact that read will return only when a frame is complete. mr.buf.Write([]byte{130, 5}) mr.buf.WriteString("AB") wg := sync.WaitGroup{} wg.Add(1) go func() { time.Sleep(100 * time.Millisecond) mr.ch <- []byte{'C', 'D', 'E', 130, 2, 'F', 'G'} wg.Done() }() // Read() will get "load" only the first frame, so after this call there // should be no pending. checkRead(100, []byte("ABCDE"), 0) // This will load the second frame. checkRead(100, []byte("FG"), 0) wg.Wait() // Set the buffer that may be populated during the init handshake. // Make sure that we process that one first. r.ib = []byte{130, 4, 'A', 'B'} mr.buf.WriteString("CD") mr.buf.Write([]byte{130, 2}) mr.buf.WriteString("EF") // This will only read up to ABCD and have no pending after the call. checkRead(100, []byte("ABCD"), 0) // We need another Read() call to read/load the second frame. checkRead(100, []byte("EF"), 0) // Close the underlying reader while reading. mr.buf.Write([]byte{130, 4, 'A', 'B'}) wg.Add(1) go func() { time.Sleep(100 * time.Millisecond) mr.close() wg.Done() }() if _, err := r.Read(p); err != io.EOF { t.Fatalf("Expected EOF, got %v", err) } wg.Wait() } func TestWSParseControlFrames(t *testing.T) { mr := &fakeReader{ch: make(chan []byte, 1)} defer mr.close() r := wsNewReader(mr) p := make([]byte, 100) // Write a PING mr.buf.Write([]byte{137, 0}) n, err := r.Read(p) if err != nil || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // Write a PONG mr.buf.Write([]byte{138, 0}) n, err = r.Read(p) if err != nil || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // Write a CLOSE mr.buf.Write([]byte{136, 6, 3, 232, 't', 'e', 's', 't'}) n, err = r.Read(p) if err != io.EOF || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // Write a CLOSE without payload mr.buf.Write([]byte{136, 2, 3, 232}) n, err = r.Read(p) if err != io.EOF || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // Write a CLOSE with invalid status mr.buf.Write([]byte{136, 1, 100}) n, err = r.Read(p) if err != io.EOF || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // Write CLOSE with valid status and payload but call with a read buffer // that has capacity of 1. mr.buf.Write([]byte{136, 6, 3, 232, 't', 'e', 's', 't'}) pl := []byte{136} n, err = r.Read(pl[:]) if err != io.EOF || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } } func TestWSParseInvalidFrames(t *testing.T) { newReader := func() (*fakeReader, *websocketReader) { mr := &fakeReader{} r := wsNewReader(mr) return mr, r } p := make([]byte, 100) // Invalid utf-8 of close message mr, r := newReader() mr.buf.Write([]byte{136, 6, 3, 232, 't', 'e', 0xF1, 't'}) n, err := r.Read(p) if err != io.EOF || n != 0 { t.Fatalf("Error on read: n=%v err=%v", n, err) } // control frame length too long mr, r = newReader() mr.buf.Write([]byte{137, 126, 0, wsMaxControlPayloadSize + 10}) for i := 0; i < wsMaxControlPayloadSize+10; i++ { mr.buf.WriteByte('a') } n, err = r.Read(p) if n != 0 || err == nil || !strings.Contains(err.Error(), "maximum") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // Not a final frame mr, r = newReader() mr.buf.Write([]byte{byte(wsPingMessage), 0}) n, err = r.Read(p[:2]) if n != 0 || err == nil || !strings.Contains(err.Error(), "final") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // Marked as compressed mr, r = newReader() mr.buf.Write([]byte{byte(wsPingMessage) | wsRsv1Bit, 0}) n, err = r.Read(p[:2]) if n != 0 || err == nil || !strings.Contains(err.Error(), "compressed") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // Continuation frame marked as compressed mr, r = newReader() mr.buf.Write([]byte{2, 3}) mr.buf.WriteString("ABC") mr.buf.Write([]byte{0 | wsRsv1Bit, 3}) mr.buf.WriteString("DEF") n, err = r.Read(p) if n != 0 || err == nil || !strings.Contains(err.Error(), "invalid continuation frame") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // Continuation frame after a final frame mr, r = newReader() mr.buf.Write([]byte{130, 3}) mr.buf.WriteString("ABC") mr.buf.Write([]byte{0, 3}) mr.buf.WriteString("DEF") n, err = r.Read(p) if n != 0 || err == nil || !strings.Contains(err.Error(), "invalid continuation frame") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // New message received before previous ended mr, r = newReader() mr.buf.Write([]byte{2, 3}) mr.buf.WriteString("ABC") mr.buf.Write([]byte{0, 3}) mr.buf.WriteString("DEF") mr.buf.Write([]byte{130, 3}) mr.buf.WriteString("GHI") n, err = r.Read(p) if n != 0 || err == nil || !strings.Contains(err.Error(), "started before final frame") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } // Unknown frame type mr, r = newReader() mr.buf.Write([]byte{99, 3}) mr.buf.WriteString("ABC") n, err = r.Read(p) if n != 0 || err == nil || !strings.Contains(err.Error(), "unknown opcode") { t.Fatalf("Unexpected error: n=%v err=%v", n, err) } } func TestWSControlFrameBetweenDataFrames(t *testing.T) { mr := &fakeReader{ch: make(chan []byte, 1)} defer mr.close() r := wsNewReader(mr) p := make([]byte, 100) // Write a frame that will continue after the PONG mr.buf.Write([]byte{2, 3}) mr.buf.WriteString("ABC") // Write a PONG mr.buf.Write([]byte{138, 0}) // Continuation of the frame mr.buf.Write([]byte{0, 3}) mr.buf.WriteString("DEF") // Another PONG mr.buf.Write([]byte{138, 0}) // End of frame mr.buf.Write([]byte{128, 3}) mr.buf.WriteString("GHI") n, err := r.Read(p) if err != nil { t.Fatalf("Error on read: %v", err) } if string(p[:n]) != "ABCDEFGHI" { t.Fatalf("Unexpected result: %q", p[:n]) } } func TestWSDecompressor(t *testing.T) { var br *wsDecompressor p := make([]byte, 100) checkRead := func(limit int, expected []byte) { t.Helper() n, err := br.Read(p[:limit]) if err != nil { t.Fatalf("Error on read: %v", err) } if got := p[:n]; !bytes.Equal(expected, got) { t.Fatalf("Expected %v, got %v", expected, got) } } checkEOF := func() { t.Helper() n, err := br.Read(p) if err != io.EOF || n > 0 { t.Fatalf("Unexpected result: n=%v err=%v", n, err) } } checkReadByte := func(expected byte) { t.Helper() b, err := br.ReadByte() if err != nil { t.Fatalf("Error on read: %v", err) } if b != expected { t.Fatalf("Expected %c, got %c", expected, b) } } checkEOFWithReadByte := func() { t.Helper() n, err := br.ReadByte() if err != io.EOF || n > 0 { t.Fatalf("Unexpected result: n=%v err=%v", n, err) } } newDecompressor := func(str string) *wsDecompressor { d := &wsDecompressor{} d.addBuf([]byte(str)) return d } // Read with enough room br = newDecompressor("ABCDE") checkRead(100, []byte("ABCDE")) checkEOF() checkEOFWithReadByte() // Read with a partial from our buffer br = newDecompressor("FGHIJ") checkRead(2, []byte("FG")) // Call with more than the end of our buffer. checkRead(10, []byte("HIJ")) checkEOF() checkEOFWithReadByte() // Read with a partial from our buffer br = newDecompressor("KLMNO") checkRead(2, []byte("KL")) // Call with exact number of bytes left for our buffer. checkRead(3, []byte("MNO")) checkEOF() checkEOFWithReadByte() // Finally, check ReadByte. br = newDecompressor("UVWXYZ") checkRead(4, []byte("UVWX")) checkReadByte('Y') checkReadByte('Z') checkEOFWithReadByte() checkEOF() br = newDecompressor("ABC") buf := make([]byte, 0) n, err := br.Read(buf) if n != 0 || err != nil { t.Fatalf("Unexpected n=%v err=%v", n, err) } } func TestWSNoMixingScheme(t *testing.T) { // Check opts.Connect() first for _, test := range []struct { url string servers []string }{ {"ws://127.0.0.1:1234", []string{"nats://127.0.0.1:1235"}}, {"ws://127.0.0.1:1234", []string{"ws://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, {"ws://127.0.0.1:1234", []string{"wss://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, {"wss://127.0.0.1:1234", []string{"nats://127.0.0.1:1235"}}, {"wss://127.0.0.1:1234", []string{"wss://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, {"wss://127.0.0.1:1234", []string{"ws://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, } { t.Run("Options", func(t *testing.T) { opts := GetDefaultOptions() opts.Url = test.url opts.Servers = test.servers nc, err := opts.Connect() if err == nil || !strings.Contains(err.Error(), "mixing") { if nc != nil { nc.Close() } t.Fatalf("Expected error about mixing, got %v", err) } }) } // Check Connect() now. for _, test := range []struct { urls string servers []string }{ {"ws://127.0.0.1:1234,nats://127.0.0.1:1235", nil}, {"ws://127.0.0.1:1234,tcp://127.0.0.1:1235", nil}, {"ws://127.0.0.1:1234,tls://127.0.0.1:1235", nil}, {"nats://127.0.0.1:1234,ws://127.0.0.1:1235", nil}, {"nats://127.0.0.1:1234,wss://127.0.0.1:1235", nil}, {"nats://127.0.0.1:1234,tls://127.0.0.1:1235,ws://127.0.0.1:1236", nil}, {"nats://127.0.0.1:1234,tls://127.0.0.1:1235,wss://127.0.0.1:1236", nil}, // In Connect(), the URL is ignored when Servers() is provided. {"", []string{"nats://127.0.0.1:1235", "ws://127.0.0.1:1236"}}, {"", []string{"nats://127.0.0.1:1235", "wss://127.0.0.1:1236"}}, {"", []string{"ws://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, {"", []string{"wss://127.0.0.1:1235", "nats://127.0.0.1:1236"}}, } { t.Run("Connect", func(t *testing.T) { var opt Option if len(test.servers) > 0 { opt = func(o *Options) error { o.Servers = test.servers return nil } } nc, err := Connect(test.urls, opt) if err == nil || !strings.Contains(err.Error(), "mixing") { if nc != nil { nc.Close() } t.Fatalf("Expected error about mixing, got %v", err) } }) } } func TestWSCompressionWithContinuationFrames(t *testing.T) { uncompressed := []byte("this is an uncompressed message with AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") buf := &bytes.Buffer{} compressor, _ := flate.NewWriter(buf, flate.BestSpeed) compressor.Write(uncompressed) compressor.Close() b := buf.Bytes() if len(b) < 30 { panic("revisit test so that compressed buffer is more than 30 bytes long") } srbuf := &bytes.Buffer{} // We are going to split this in several frames. fh := []byte{66, 10} srbuf.Write(fh) srbuf.Write(b[:10]) fh = []byte{0, 10} srbuf.Write(fh) srbuf.Write(b[10:20]) fh = []byte{wsFinalBit, 0} fh[1] = byte(len(b) - 20) srbuf.Write(fh) srbuf.Write(b[20:]) r := wsNewReader(srbuf) rbuf := make([]byte, 100) n, err := r.Read(rbuf[:15]) // Since we have a partial of compressed message, the library keeps track // of buffer, but it can't return anything at this point, so n==0 err==nil // is the expected result. if n != 0 || err != nil { t.Fatalf("Error reading: n=%v err=%v", n, err) } n, err = r.Read(rbuf) if n != len(uncompressed) || err != nil { t.Fatalf("Error reading: n=%v err=%v", n, err) } if !reflect.DeepEqual(uncompressed, rbuf[:n]) { t.Fatalf("Unexpected uncompressed data: %v", rbuf[:n]) } } func TestWSTlsNoConfig(t *testing.T) { opts := GetDefaultOptions() opts.Servers = []string{"wss://localhost:443"} nc := &Conn{Opts: opts} if err := nc.setupServerPool(); err != nil { t.Fatalf("Error setting up pool: %v", err) } // Verify that this has set Secure/TLSConfig nc.mu.Lock() ok := nc.Opts.Secure && nc.Opts.TLSConfig != nil nc.mu.Unlock() if !ok { t.Fatal("Secure and TLSConfig were not set") } // Now try to add a bare host:ip to the pool and verify // that the wss:// scheme is added. if err := nc.addURLToPool("1.2.3.4:443", true, false); err != nil { t.Fatalf("Error adding to pool: %v", err) } nc.mu.Lock() for _, srv := range nc.srvPool { if srv.url.Scheme != wsSchemeTLS { nc.mu.Unlock() t.Fatalf("Expected scheme to be %q, got url: %s", wsSchemeTLS, srv.url) } } nc.mu.Unlock() } func TestWSProxyPath(t *testing.T) { const proxyPath = "proxy1" // Listen to a random port l, err := net.Listen("tcp", ":0") if err != nil { t.Fatalf("Error in listen: %v", err) } defer l.Close() proxyPort := l.Addr().(*net.TCPAddr).Port ch := make(chan struct{}, 1) proxySrv := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/"+proxyPath { ch <- struct{}{} } }), } defer proxySrv.Shutdown(context.Background()) go proxySrv.Serve(l) for _, test := range []struct { name string path string }{ {"without slash", proxyPath}, {"with slash", "/" + proxyPath}, } { t.Run(test.name, func(t *testing.T) { url := fmt.Sprintf("ws://127.0.0.1:%d", proxyPort) nc, err := Connect(url, ProxyPath(test.path)) if err == nil { nc.Close() t.Fatal("Did not expect to connect") } select { case <-ch: // OK: case <-time.After(time.Second): t.Fatal("Proxy was not reached") } }) } }