pax_global_header00006660000000000000000000000064146125674130014523gustar00rootroot0000000000000052 comment=cab3f83da2d8bf04940fc7193284005604d0bd71 golang-github-ibm-sarama-1.43.2/000077500000000000000000000000001461256741300163505ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/.github/000077500000000000000000000000001461256741300177105ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001461256741300220735ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000015251461256741300245700ustar00rootroot00000000000000--- name: Bug report about: Report an issue that you've found title: "" labels: "" assignees: "" --- ##### Description ##### Versions | Sarama | Kafka | Go | | ------ | ----- | --- | | | | | ##### Configuration ```go ``` ##### Logs
logs: CLICK ME

``` ```

##### Additional Context golang-github-ibm-sarama-1.43.2/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000010061461256741300256150ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: "" labels: "" assignees: "" --- #### Description #### Additional context golang-github-ibm-sarama-1.43.2/.github/dependabot.yml000066400000000000000000000036011461256741300225400ustar00rootroot00000000000000--- version: 2 updates: - package-ecosystem: docker directory: / schedule: interval: daily labels: - "ignore-for-release" commit-message: prefix: chore(ci) - package-ecosystem: github-actions directory: / open-pull-requests-limit: 2 schedule: interval: weekly labels: - "ignore-for-release" commit-message: prefix: chore(ci) - package-ecosystem: gomod directory: / open-pull-requests-limit: 5 schedule: interval: "daily" time: "23:00" labels: - "dependencies" commit-message: prefix: chore include: "scope" groups: golang-org-x: patterns: - "golang.org/x/*" - package-ecosystem: gomod directory: /examples/consumergroup schedule: interval: daily time: "23:00" labels: - "dependencies" commit-message: prefix: chore include: "scope" groups: golang-org-x: patterns: - "golang.org/x/*" - package-ecosystem: gomod directory: /examples/exactly_once schedule: interval: daily time: "23:00" labels: - "dependencies" commit-message: prefix: chore include: "scope" groups: golang-org-x: patterns: - "golang.org/x/*" - package-ecosystem: gomod directory: /examples/interceptors schedule: interval: daily time: "23:00" labels: - "dependencies" commit-message: prefix: chore include: "scope" groups: golang-org-x: patterns: - "golang.org/x/*" - package-ecosystem: gomod directory: /examples/txn_producer schedule: interval: daily time: "23:00" labels: - "dependencies" commit-message: prefix: chore include: "scope" groups: golang-org-x: patterns: - "golang.org/x/*" golang-github-ibm-sarama-1.43.2/.github/release.yaml000066400000000000000000000013341461256741300222150ustar00rootroot00000000000000changelog: exclude: labels: - ignore-for-release - invalid - no-changelog - skip-changelog categories: - title: ":rotating_light: Breaking Changes" labels: - breaking - title: ":tada: New Features / Improvements" labels: - enhancement - feature - feat - title: ":bug: Fixes" labels: - bug - bugfix - fix - regression - title: ":package: Dependency updates" labels: - dependencies - deps - title: ":wrench: Maintenance" labels: - build - chore - ci - housekeeping - internal - title: ":memo: Documentation" labels: - documentation - docs - title: ":heavy_plus_sign: Other Changes" labels: - "*" golang-github-ibm-sarama-1.43.2/.github/workflows/000077500000000000000000000000001461256741300217455ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/.github/workflows/apidiff.yml000066400000000000000000000024301461256741300240710ustar00rootroot00000000000000name: API Compatibility on: merge_group: push: branches: - main paths-ignore: - '**/*.md' pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: apidiff: runs-on: ubuntu-latest if: github.base_ref steps: - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: 1.22.x - name: Add GOBIN to PATH run: echo "$(go env GOPATH)/bin" >>$GITHUB_PATH - name: Install apidiff cmd run: go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231006140011-7918f672742d - name: Checkout base code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: ref: ${{ github.base_ref }} path: "base" persist-credentials: false - name: Capture apidiff baseline run: apidiff -m -w ../baseline.bin . working-directory: "base" - name: Checkout updated code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: path: "updated" persist-credentials: false - name: Run apidiff check run: apidiff -m -incompatible ../baseline.bin . working-directory: "updated" golang-github-ibm-sarama-1.43.2/.github/workflows/ci.yml000066400000000000000000000031561461256741300230700ustar00rootroot00000000000000name: CI on: merge_group: push: branches: - main paths-ignore: - '**/*.md' pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: lint: permissions: contents: read # for actions/checkout to fetch code pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: Linting with Go ${{ matrix.go-version }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: go-version: [1.22.x] steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ matrix.go-version }} - name: golangci-lint env: GOFLAGS: -tags=functional uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 with: version: v1.57.2 test: name: Unit Testing with Go ${{ matrix.go-version }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: go-version: [1.19.x, 1.21.x, 1.22.x] env: DEBUG: true GOFLAGS: -trimpath steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ matrix.go-version }} - name: Test (Unit) run: make test golang-github-ibm-sarama-1.43.2/.github/workflows/codeql-analysis.yml000066400000000000000000000024241461256741300255620ustar00rootroot00000000000000name: "CodeQL" on: merge_group: push: branches: - main pull_request: branches: - "**" schedule: - cron: "39 12 * * 1" permissions: contents: read # for actions/checkout to fetch code jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read # for github/codeql-action to list actions contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action to report security issues strategy: fail-fast: false matrix: language: ["go"] steps: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Initialize CodeQL uses: github/codeql-action/init@8f596b4ae3cb3c588a5c46780b86dd53fef16c52 # v3.25.2 with: languages: ${{ matrix.language }} - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: 1.22.x - name: Autobuild uses: github/codeql-action/autobuild@8f596b4ae3cb3c588a5c46780b86dd53fef16c52 # v3.25.2 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@8f596b4ae3cb3c588a5c46780b86dd53fef16c52 # v3.25.2 golang-github-ibm-sarama-1.43.2/.github/workflows/dependency-review.yml000066400000000000000000000016701461256741300261110ustar00rootroot00000000000000# Dependency Review Action # # This Action will scan dependency manifest files that change as part of a Pull Request, # surfacing known-vulnerable versions of the packages declared or updated in the PR. # Once installed, if the workflow run is marked as required, # PRs introducing known-vulnerable packages will be blocked from merging. # # Source repository: https://github.com/actions/dependency-review-action name: 'Dependency Review' on: pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: dependency-review: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: 'Dependency Review' uses: actions/dependency-review-action@5bbc3ba658137598168acb2ab73b21c432dd411b # v4.2.5 golang-github-ibm-sarama-1.43.2/.github/workflows/fuzz.yml000066400000000000000000000013511461256741300234660ustar00rootroot00000000000000name: Fuzzing on: merge_group: push: branches: - main paths-ignore: - '**/*.md' pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: test: name: Fuzz runs-on: ubuntu-latest env: GOFLAGS: -trimpath steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: 1.22.x - name: Run any fuzzing tests run: go test -list . | grep '^Fuzz' | parallel 'go test -v -run=^{}$ -fuzz=^{}$ -fuzztime=5m' golang-github-ibm-sarama-1.43.2/.github/workflows/fvt-main.yml000066400000000000000000000021061461256741300242100ustar00rootroot00000000000000name: FVT (main) on: merge_group: push: branches: - main paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: fvt: name: Test with Kafka ${{ matrix.kafka-version }} strategy: fail-fast: false matrix: go-version: [1.22.x] kafka-version: [1.0.2, 2.0.1, 2.2.2, 2.6.2, 2.8.2, 3.0.2, 3.3.2, 3.6.0] include: - kafka-version: 1.0.2 scala-version: 2.11 - kafka-version: 2.0.1 scala-version: 2.12 - kafka-version: 2.2.2 scala-version: 2.12 - kafka-version: 2.6.2 scala-version: 2.12 - kafka-version: 2.8.2 scala-version: 2.12 - kafka-version: 3.0.2 scala-version: 2.12 - kafka-version: 3.3.2 scala-version: 2.13 - kafka-version: 3.6.0 scala-version: 2.13 uses: ./.github/workflows/fvt.yml with: go-version: ${{ matrix.go-version }} kafka-version: ${{ matrix.kafka-version }} scala-version: ${{ matrix.scala-version }} golang-github-ibm-sarama-1.43.2/.github/workflows/fvt-pr.yml000066400000000000000000000013511461256741300237060ustar00rootroot00000000000000name: FVT (PR) on: pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: fvt: name: Test with Kafka ${{ matrix.kafka-version }} strategy: fail-fast: false matrix: go-version: [1.22.x] kafka-version: [1.0.2, 2.6.2, 3.6.0] include: - kafka-version: 1.0.2 scala-version: 2.11 - kafka-version: 2.6.2 scala-version: 2.12 - kafka-version: 3.6.0 scala-version: 2.13 uses: ./.github/workflows/fvt.yml with: go-version: ${{ matrix.go-version }} kafka-version: ${{ matrix.kafka-version }} scala-version: ${{ matrix.scala-version }} golang-github-ibm-sarama-1.43.2/.github/workflows/fvt.yml000066400000000000000000000051171461256741300232730ustar00rootroot00000000000000name: FVT on: workflow_call: inputs: go-version: required: false type: string default: 1.22.x kafka-version: required: false type: string default: 3.6.0 scala-version: required: false type: string default: 2.13 permissions: contents: read # for actions/checkout to fetch code jobs: fvt: name: Test with Kafka ${{ inputs.kafka-version }} runs-on: ubuntu-latest env: DEBUG: true GOFLAGS: -trimpath KAFKA_VERSION: ${{ inputs.kafka-version }} SCALA_VERSION: ${{ inputs.scala-version }} steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Setup Docker uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 id: buildx - name: Build FVT Docker Image uses: docker/bake-action@33a1cc61dab930ceab0181de8cdc34a8cc55e61d # master with: builder: ${{ steps.buildx.outputs.name }} files: docker-compose.yml load: true targets: kafka-1 set: | *.cache-from=type=gha,scope=${{ github.workflow }} *.cache-to=type=gha,scope=${{ github.workflow }},mode=max - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ inputs.go-version }} - name: Setup Docker Compose run: | curl -sSL "https://github.com/docker/compose/releases/download/v2.20.3/docker-compose-$(uname -s)-$(uname -m)" -o /tmp/docker-compose sudo install -m755 /tmp/docker-compose "$(dirname $(which docker-compose))" docker version --format 'Docker Engine version v{{.Server.Version}}' docker-compose version - name: Test (Functional) run: | nohup sudo tcpdump -i lo -w "fvt-kafka-${{ inputs.kafka-version }}.pcap" portrange 29091-29095 >/dev/null 2>&1 & echo $! >tcpdump.pid make test_functional - name: Stop tcpdump if: always() run: | if [ -f "tcpdump.pid" ]; then sudo kill "$(cat tcpdump.pid)" || true; fi if [ -f "fvt-kafka-${{ inputs.kafka-version }}.pcap" ]; then sudo chmod a+r "fvt-kafka-${{ inputs.kafka-version }}.pcap"; fi - name: Upload pcap file if: always() uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: fvt-kafka-${{ inputs.kafka-version }}.pcap path: fvt-kafka-${{ inputs.kafka-version }}.pcap retention-days: 5 if-no-files-found: ignore golang-github-ibm-sarama-1.43.2/.github/workflows/i386.yml000066400000000000000000000020161461256741300231600ustar00rootroot00000000000000name: i386 on: merge_group: push: branches: - main paths-ignore: - '**/*.md' pull_request: branches: - "**" paths-ignore: - '**/*.md' permissions: contents: read # for actions/checkout to fetch code jobs: atomicalign: permissions: contents: read # for actions/checkout to fetch code pull-requests: read # for golangci/golangci-lint-action to fetch pull requests runs-on: ubuntu-latest steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Setup Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: 1.22.x - name: staticcheck env: GOARCH: 386 GOFLAGS: -tags=functional run: | git clone --depth=1 https://github.com/dominikh/go-tools /tmp/go-tools ( cd /tmp/go-tools/cmd/staticcheck && go build -o /tmp/staticcheck ) /tmp/staticcheck -checks SA1027 ./... golang-github-ibm-sarama-1.43.2/.github/workflows/scorecard.yml000066400000000000000000000056421461256741300244440ustar00rootroot00000000000000# This workflow uses actions that are not certified by GitHub. They are provided # by a third-party and are governed by separate terms of service, privacy # policy, and support documentation. name: Scorecard supply-chain security on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection branch_protection_rule: # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: '17 4 * * 5' push: branches: [ "main" ] # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write # Uncomment the permissions below if installing in a private repository. # contents: read # actions: read steps: - name: "Checkout code" uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: # - you want to enable the Branch-Protection check on a *public* repository, or # - you are installing Scorecard on a *private* repository # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. # repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. # For private repositories: # - `publish_results` will always be set to `false`, regardless # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@8f596b4ae3cb3c588a5c46780b86dd53fef16c52 # v3.25.2 with: sarif_file: results.sarif golang-github-ibm-sarama-1.43.2/.github/workflows/stale.yml000066400000000000000000000033171461256741300236040ustar00rootroot00000000000000# configuration for https://github.com/actions/stale name: "Stale issues and PRs" on: schedule: - cron: "0 */2 * * *" workflow_dispatch: permissions: contents: read # for actions/checkout to fetch code jobs: stale: permissions: issues: write # for actions/stale to close stale issues pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-latest steps: # pinned to main commit to make use of https://github.com/actions/stale/pull/1033 - uses: actions/stale@3f3b0175e8c66fb49b9a6d5a0cd1f8436d4c3ab6 # main with: ascending: true days-before-stale: 90 days-before-close: 30 stale-issue-message: >- Thank you for taking the time to raise this issue. However, it has not had any activity on it in the past 90 days and will be closed in 30 days if no updates occur. Please check if the main branch has already resolved the issue since it was raised. If you believe the issue is still valid and you would like input from the maintainers then please comment to ask for it to be reviewed. stale-pr-message: >- Thank you for your contribution! However, this pull request has not had any activity in the past 90 days and will be closed in 30 days if no updates occur. If you believe the changes are still valid then please verify your branch has no conflicts with main and rebase if needed. If you are awaiting a (re-)review then please let us know. stale-issue-label: "stale" exempt-issue-labels: "stale/exempt,pinned" stale-pr-label: "stale" exempt-pr-labels: "stale/exempt,pinned" golang-github-ibm-sarama-1.43.2/.gitignore000066400000000000000000000005011461256741300203340ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so *.test # Folders _obj _test .vagrant # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe /bin /coverage.txt /profile.out /output.json .idea golang-github-ibm-sarama-1.43.2/.golangci.yml000066400000000000000000000031541461256741300207370ustar00rootroot00000000000000run: timeout: 5m deadline: 10m linters-settings: govet: check-shadowing: false golint: min-confidence: 0 gocyclo: min-complexity: 99 maligned: suggest-new: true dupl: threshold: 100 goconst: min-len: 2 min-occurrences: 3 misspell: locale: US goimports: local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic - performance # - experimental # - opinionated # - style enabled-checks: - importShadow - nestingReduce - stringsCompare # - unnamedResult # - whyNoLint disabled-checks: - assignOp - appendAssign - commentedOutCode - hugeParam - ifElseChain - singleCaseSwitch - sloppyReassign funlen: lines: 300 statements: 300 depguard: rules: main: deny: - pkg: "io/ioutil" desc: Use the "io" and "os" packages instead. linters: disable-all: true enable: - bodyclose - depguard - exportloopref - dogsled - errcheck - errorlint - funlen - gochecknoinits - gocritic - gocyclo - gofmt - goimports - gosec - govet - misspell - nilerr - staticcheck - typecheck - unconvert - unused - whitespace issues: exclude: - "G404: Use of weak random number generator" exclude-rules: # exclude some linters from running on certains files. - path: functional.*_test\.go linters: - paralleltest # maximum count of issues with the same text. set to 0 for unlimited. default is 3. max-same-issues: 0 golang-github-ibm-sarama-1.43.2/.pre-commit-config.yaml000066400000000000000000000024271461256741300226360ustar00rootroot00000000000000fail_fast: false default_install_hook_types: [pre-commit, commit-msg] repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: check-merge-conflict - id: check-yaml - id: end-of-file-fixer - id: fix-byte-order-marker - id: mixed-line-ending - id: trailing-whitespace - repo: local hooks: - id: conventional-commit-msg-validation name: commit message conventional validation language: pygrep entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)' args: [--multiline, --negate] stages: [commit-msg] - id: commit-msg-needs-to-be-signed-off name: commit message needs to be signed off language: pygrep entry: "^Signed-off-by:" args: [--multiline, --negate] stages: [commit-msg] - id: gofmt name: gofmt description: Format files with gofmt. entry: gofmt -l language: golang files: \.go$ args: [] - repo: https://github.com/gitleaks/gitleaks rev: v8.16.3 hooks: - id: gitleaks - repo: https://github.com/golangci/golangci-lint rev: v1.52.2 hooks: - id: golangci-lint golang-github-ibm-sarama-1.43.2/CHANGELOG.md000066400000000000000000002651351461256741300201750ustar00rootroot00000000000000# Changelog ## Version 1.42.2 (2024-02-09) ## What's Changed ⚠️ The go.mod directive has been bumped to 1.18 as the minimum version of Go required for the module. This was necessary to continue to receive updates from some of the third party dependencies that Sarama makes use of for compression. ### :tada: New Features / Improvements * feat: update go directive to 1.18 by @dnwe in https://github.com/IBM/sarama/pull/2713 * feat: return KError instead of errors in AlterConfigs and DescribeConfig by @zhuliquan in https://github.com/IBM/sarama/pull/2472 ### :bug: Fixes * fix: don't waste time for backoff on member id required error by @lzakharov in https://github.com/IBM/sarama/pull/2759 * fix: prevent ConsumerGroup.Close infinitely locking by @maqdev in https://github.com/IBM/sarama/pull/2717 ### :package: Dependency updates * chore(deps): bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/IBM/sarama/pull/2716 * chore(deps): bump golang.org/x/sync to v0.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2718 * chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.18 to 4.1.19 by @dependabot in https://github.com/IBM/sarama/pull/2739 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2748 * chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2734 * chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2764 * chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.19 to 4.1.21 by @dependabot in https://github.com/IBM/sarama/pull/2763 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/exactly_once by @dependabot in https://github.com/IBM/sarama/pull/2749 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/consumergroup by @dependabot in https://github.com/IBM/sarama/pull/2750 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/sasl_scram_client by @dependabot in https://github.com/IBM/sarama/pull/2751 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2752 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/http_server by @dependabot in https://github.com/IBM/sarama/pull/2753 * chore(deps): bump github.com/eapache/go-resiliency from 1.4.0 to 1.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2745 * chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/txn_producer by @dependabot in https://github.com/IBM/sarama/pull/2754 * chore(deps): bump go.opentelemetry.io/otel/sdk from 1.19.0 to 1.22.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2767 * chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2793 * chore(deps): bump go.opentelemetry.io/otel/exporters/stdout/stdoutmetric from 0.42.0 to 1.23.1 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2792 ### :wrench: Maintenance * fix(examples): housekeeping of code and deps by @dnwe in https://github.com/IBM/sarama/pull/2720 ### :heavy_plus_sign: Other Changes * fix(test): retry MockBroker Listen for EADDRINUSE by @dnwe in https://github.com/IBM/sarama/pull/2721 ## New Contributors * @maqdev made their first contribution in https://github.com/IBM/sarama/pull/2717 * @zhuliquan made their first contribution in https://github.com/IBM/sarama/pull/2472 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.1...v1.42.2 ## Version 1.42.1 (2023-11-07) ## What's Changed ### :bug: Fixes * fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705 * fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1 ## Version 1.42.0 (2023-11-02) ## What's Changed ### :bug: Fixes * Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693 * Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698 * fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678 * Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700 * fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701 ### :wrench: Maintenance * chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685 * chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692 ### :heavy_plus_sign: Other Changes * chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683 * fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689 * chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691 ## New Contributors * @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693 * @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0 ## Version 1.41.3 (2023-10-17) ## What's Changed ### :bug: Fixes * fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663 * fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672 ### :package: Dependency updates * chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661 * chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671 ### :memo: Documentation * fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657 ## New Contributors * @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657 * @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3 ## Version 1.41.2 (2023-09-12) ## What's Changed ### :tada: New Features / Improvements * perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646 ### :bug: Fixes * fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636 * fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642 * fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645 ### :package: Dependency updates * chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641 ## New Contributors * @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2 ## Version 1.41.1 (2023-08-30) ## What's Changed ### :bug: Fixes * fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618 * fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628 * fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633 * fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632 ### :wrench: Maintenance * chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620 ### :memo: Documentation * fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386 * chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627 ### :heavy_plus_sign: Other Changes * chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1 ## Version 1.41.0 (2023-08-21) ## What's Changed ### :rotating_light: Breaking Changes Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version. * chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572 * chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574 ### :tada: New Features / Improvements * Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156 * feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536 * feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538 * fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544 * Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541 * fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546 * fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548 * chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549 * chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550 * chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551 * fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553 * fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554 * fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555 * fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556 * fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558 * fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565 * fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568 * feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566 * fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570 * feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573 * feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475 * feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600 ### :bug: Fixes * fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528 * fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532 * extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533 * Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545 * fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563 * bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560 * fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569 * fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594 * fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595 * fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614 * fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616 * fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609 ### :package: Dependency updates * chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542 * chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561 * chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589 * chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587 * chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586 * chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588 * chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598 ### :wrench: Maintenance * fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531 * chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537 * Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543 * fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552 * chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557 * fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571 * chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576 * chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575 * chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578 * chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580 * chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579 * chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581 * chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591 * feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592 * chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593 * feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601 * fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603 * fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612 * fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613 * chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615 * feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605 ### :memo: Documentation * fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479 * chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599 ## New Contributors * @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156 * @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543 * @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479 * @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560 * @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475 * @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0 ## Version 1.40.1 (2023-07-27) ## What's Changed ### :tada: New Features / Improvements * Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484 * feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457 ### :bug: Fixes * Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517 * fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519 * Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520 * fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522 * fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525 ### :package: Dependency updates * chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509 * chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513 * chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512 ### :wrench: Maintenance * chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496 * chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504 * Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514 ### :heavy_plus_sign: Other Changes * chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497 * chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508 * fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523 ## New Contributors * @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504 * @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514 * @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484 * @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1 ## Version 1.40.0 (2023-07-17) ## What's Changed Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 ### :rotating_light: Breaking Changes - chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 - fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 ### :bug: Fixes - fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 - fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 - fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 - Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 - fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 ### :package: Dependency updates - chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 ### :wrench: Maintenance - chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 - chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 - chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 ## New Contributors - @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 - @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 - @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 ## Version 1.38.1 (2023-01-22) ## What's Changed ### :bug: Fixes * fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 * fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 ### :package: Dependency updates * chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 * chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 * chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 * chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 * chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 * chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 ## New Contributors * @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 * @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 ## Version 1.38.0 (2023-01-08) ## What's Changed ### :tada: New Features / Improvements * feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 * feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 * feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 ### :bug: Fixes * fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 * fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 * fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 * fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 * fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 * fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 ### :package: Dependency updates * chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 ### :wrench: Maintenance * chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 * chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 ## New Contributors * @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 * @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 * @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 * @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 ## Version 1.37.2 (2022-10-04) ## What's Changed ### :bug: Fixes * fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 ### :heavy_plus_sign: Other Changes * fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 ## Version 1.37.1 (2022-10-04) ## What's Changed ### :bug: Fixes * fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 * fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 * fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 ## New Contributors * @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 ## Version 1.37.0 (2022-09-28) ## What's Changed ### :rotating_light: Breaking Changes * Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. ### :tada: New Features / Improvements * feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 * feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 * feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 ### :bug: Fixes * fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 * fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 * fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 * fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 * fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 * Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 ### :package: Dependency updates * chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 * chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 * chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 * chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 * chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 * chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 * chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 ### :wrench: Maintenance * chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 * chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 ## New Contributors * @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 * @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 * @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 * @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 * @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 ## Version 1.36.0 (2022-08-11) ## What's Changed ### :tada: New Features / Improvements * feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 * feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 * feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 ### :bug: Fixes * fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 ### :package: Dependency updates * chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 * chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 * chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 * chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 * chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 * chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 * chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 * chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 ### :wrench: Maintenance * chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 ### :heavy_plus_sign: Other Changes * Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 * Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 * chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 ## New Contributors * @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 * @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 * @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 ## Version 1.35.0 (2022-07-22) ## What's Changed ### :bug: Fixes * fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 * fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 * fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 * fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 ### :package: Dependency updates * chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 * chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 * chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 * chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 * chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 ### :wrench: Maintenance * chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 * chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 * chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 ### :heavy_plus_sign: Other Changes * chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 * fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 * fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 ## New Contributors * @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 ## Version 1.34.1 (2022-06-07) ## What's Changed ### :bug: Fixes * fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 * fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 * fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 * fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 ### :wrench: Maintenance * chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 * chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 ## New Contributors * @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 * @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 ## Version 1.34.0 (2022-05-30) ## What's Changed ### :tada: New Features / Improvements * KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 ### :bug: Fixes * fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 ### :wrench: Maintenance * chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 * chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 ## New Contributors * @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 ## Version 1.33.0 (2022-05-11) ## What's Changed ### :rotating_light: Breaking Changes **Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** * feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 ### :tada: New Features / Improvements * feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 * KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 * feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 ### :bug: Fixes * fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 * feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 * fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 * fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 * fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 * fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 * fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 * fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 * producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 * fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 * fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 * fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 * fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 * fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 * fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 * fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 * fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 * fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 ### :package: Dependency updates * chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 ### :wrench: Maintenance * fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 * fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 * chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 * chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 ### :heavy_plus_sign: Other Changes * Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 * chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 * chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 * fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 ## New Contributors * @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 * @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 * @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 * @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 * @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 * @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 * @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 ## Version 1.32.0 (2022-02-24) ### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. * chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 --- ## What's Changed ### :bug: Fixes * Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 ### :package: Dependency updates * chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 ### :wrench: Maintenance * fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 * fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 * chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 ### :heavy_plus_sign: Other Changes * chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 * chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 * Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 * chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 ## Version 1.31.1 (2022-02-01) - #2126 - @bai - Populate missing kafka versions - #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image - #2123 - @bai - Update klauspost/compress to 0.14 - #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy - #2119 - @bai - Add Kafka 3.1.0 version number - #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption - #2051 - @seveas - Expose the TLS connection state of a broker connection - #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys - #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup - #2113 - @mosceo - Fix typo ## Version 1.31.0 (2022-01-18) ## What's Changed ### :tada: New Features / Improvements * feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 * feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 * Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 ### :bug: Fixes * fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 * fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 * fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 * fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 * fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 * fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 * fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 * fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 ### :wrench: Maintenance * chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 ### :memo: Documentation * docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 ### :heavy_plus_sign: Other Changes * Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 ## New Contributors * @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 * @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 * @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 * @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 ## Version 1.30.1 (2021-12-04) ## What's Changed ### :tada: New Features / Improvements * feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 ### :bug: Fixes * fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 * logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 * fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 * fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 ### :wrench: Maintenance * chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 * chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 ## Notes * ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x ## New Contributors * @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 * @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 * @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 * @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 ## Version 1.30.0 (2021-09-29) ⚠️ This release has been superseded by v1.30.1 and should _not_ be used. **regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 --- ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** --- # New Features / Improvements - #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh - #2000 - @matzew - Using xdg-go module for SCRAM - #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures - #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM - #2006 - @faillefer - Add support for DeleteOffsets operation - #1909 - @agriffaut - KIP-546 Client quota APIs - #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state - #1275 - @dnwe - log: add a DebugLogger that proxies to Logger - #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log - #2019 - @dnwe - feat: add logging & a metric for producer throttle - #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface - #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol - #2028 - @dnwe - feat: send ApiVersionsRequest on broker open - #2034 - @bai - Add support for kafka 3.0.0 # Fixes - #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest - #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation - #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls - #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true - #2007 - @bai - Add support for Go 1.17 - #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks - #2010 - @dnwe - chore: enable exportloopref and misspell linters - #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements - #2015 - @bai - Change default branch to main - #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() - #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 - #2016 - @dnwe - chore: replace deprecated Go calls - #2017 - @dnwe - chore: delete legacy vagrant script - #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test - #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 - #2033 - @bai - Update dependencies - #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method - #2035 - @dnwe - chore: populate the missing kafka versions - #2038 - @dnwe - feat: add a fuzzing workflow to github actions ## New Contributors * @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 * @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 * @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 * @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 * @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 * @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 * @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 * @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 * @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 * @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 **Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 ## Version 1.29.1 (2021-06-24) # New Features / Improvements - #1966 - @ajanikow - KIP-339: Add Incremental Config updates API - #1964 - @ajanikow - Add DelegationToken ResourceType # Fixes - #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire - #1971 - @KerryJava - fix kafka-producer-performance throughput panic - #1968 - @dnwe - chore: bump golang.org/x versions - #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers - #1963 - @dnwe - fix: ensure backoff timer is re-used - #1949 - @dnwe - fix: explicitly use uint64 for payload length ## Version 1.29.0 (2021-05-07) ### New Features / Improvements - #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API - #1869 - @wyndhblb - zstd: encode+decode performance improvements - #1541 - @izolight - add String, (Un)MarshalText for acl types. - #1921 - @bai - Add support for Kafka 2.8.0 ### Fixes - #1936 - @dnwe - fix(consumer): follow preferred broker - #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication - #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) - #1926 - @dnwe - fix: correct initial CodeQL findings - #1925 - @bai - Test out CodeQL - #1923 - @bestgopher - Remove redundant switch-case, fix doc typos - #1922 - @bai - Update go dependencies - #1898 - @mmaslankaprv - Parsing only known control batches value - #1887 - @withshubh - Fix: issues affecting code quality ## Version 1.28.0 (2021-02-15) **Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - #1870 - @kvch - Update Kerberos library to latest major - #1876 - @bai - Update docs, reference pkg.go.dev - #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close - #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages - #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies - #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy - #1862 - @bai - Fix CI setenv permissions issues - #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev - #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica ## Version 1.27.2 (2020-10-21) ### Improvements #1750 - @krantideep95 Adds missing mock responses for mocking consumer group ## Fixes #1817 - reverts #1785 - Add private method to Client interface to prevent implementation ## Version 1.27.1 (2020-10-07) ### Improvements #1775 - @d1egoaz - Adds a Producer Interceptor example #1781 - @justin-chen - Refresh brokers given list of seed brokers #1784 - @justin-chen - Add randomize seed broker method #1790 - @d1egoaz - remove example binary #1798 - @bai - Test against Go 1.15 #1785 - @justin-chen - Add private method to Client interface to prevent implementation #1802 - @uvw - Support Go 1.13 error unwrapping ## Fixes #1791 - @stanislavkozlovski - bump default version to 1.0.0 ## Version 1.27.0 (2020-08-11) ### Improvements #1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration #1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests #1699 - @wclaeys - Consumer group support for manually comitting offsets #1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 #1726 - @d1egoaz - Include zstd on the functional tests #1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors #1738 - @varun06 - fixed variable names that are named same as some std lib package names #1741 - @varun06 - updated zstd dependency to latest v1.10.10 #1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base #1763 - @alrs - remove deprecated tls options from test #1769 - @bai - Add support for Kafka 2.6.0 ## Fixes #1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication #1744 - @alrs - Fix isBalanced Function Signature ## Version 1.26.4 (2020-05-19) ## Fixes - #1701 - @d1egoaz - Set server name only for the current broker - #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka ## Version 1.26.3 (2020-05-07) ## Fixes - #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config ## Version 1.26.2 (2020-05-06) ## ⚠️ Known Issues This release has been marked as not ready for production and may be unstable, please use v1.26.4. ### Improvements - #1560 - @iyacontrol - add sync pool for gzip 1-9 - #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID - #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs - #1632 - @bai - Add support for Go 1.14 - #1640 - @random-dwi - Feature/fix list partition reassignments - #1646 - @mimaison - Add DescribeLogDirs to admin client - #1667 - @bai - Add support for kafka 2.5.0 ## Fixes - #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 - #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine - #1602 - @d1egoaz - adds a note about consumer groups Consume method - #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly - #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented - #1614 - @alrs - produce_response.go: Remove Unused Functions - #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables - #1639 - @agriffaut - Handle errors with no message but error code - #1643 - @kzinglzy - fix `config.net.keepalive` - #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs - #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata - #1650 - @lavoiesl - Return the response error in heartbeatLoop - #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die - #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. ## Version 1.26.1 (2020-02-04) Improvements: - Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) - Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) - Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) - Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) Bug Fixes: - Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) - Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) ## Version 1.26.0 (2020-01-24) New Features: - Enable zstd compression ([1574](https://github.com/IBM/sarama/pull/1574), [1582](https://github.com/IBM/sarama/pull/1582)) - Support headers in tools kafka-console-producer ([1549](https://github.com/IBM/sarama/pull/1549)) Improvements: - Add SASL AuthIdentity to SASL frames (authzid) ([1585](https://github.com/IBM/sarama/pull/1585)). Bug Fixes: - Sending messages with ZStd compression enabled fails in multiple ways ([1252](https://github.com/IBM/sarama/issues/1252)). - Use the broker for any admin on BrokerConfig ([1571](https://github.com/IBM/sarama/pull/1571)). - Set DescribeConfigRequest Version field ([1576](https://github.com/IBM/sarama/pull/1576)). - ConsumerGroup flooding logs with client/metadata update req ([1578](https://github.com/IBM/sarama/pull/1578)). - MetadataRequest version in DescribeCluster ([1580](https://github.com/IBM/sarama/pull/1580)). - Fix deadlock in consumer group handleError ([1581](https://github.com/IBM/sarama/pull/1581)) - Fill in the Fetch{Request,Response} protocol ([1582](https://github.com/IBM/sarama/pull/1582)). - Retry topic request on ControllerNotAvailable ([1586](https://github.com/IBM/sarama/pull/1586)). ## Version 1.25.0 (2020-01-13) New Features: - Support TLS protocol in kafka-producer-performance ([1538](https://github.com/IBM/sarama/pull/1538)). - Add support for kafka 2.4.0 ([1552](https://github.com/IBM/sarama/pull/1552)). Improvements: - Allow the Consumer to disable auto-commit offsets ([1164](https://github.com/IBM/sarama/pull/1164)). - Produce records with consistent timestamps ([1455](https://github.com/IBM/sarama/pull/1455)). Bug Fixes: - Fix incorrect SetTopicMetadata name mentions ([1534](https://github.com/IBM/sarama/pull/1534)). - Fix client.tryRefreshMetadata Println ([1535](https://github.com/IBM/sarama/pull/1535)). - Fix panic on calling updateMetadata on closed client ([1531](https://github.com/IBM/sarama/pull/1531)). - Fix possible faulty metrics in TestFuncProducing ([1545](https://github.com/IBM/sarama/pull/1545)). ## Version 1.24.1 (2019-10-31) New Features: - Add DescribeLogDirs Request/Response pair ([1520](https://github.com/IBM/sarama/pull/1520)). Bug Fixes: - Fix ClusterAdmin returning invalid controller ID on DescribeCluster ([1518](https://github.com/IBM/sarama/pull/1518)). - Fix issue with consumergroup not rebalancing when new partition is added ([1525](https://github.com/IBM/sarama/pull/1525)). - Ensure consistent use of read/write deadlines ([1529](https://github.com/IBM/sarama/pull/1529)). ## Version 1.24.0 (2019-10-09) New Features: - Add sticky partition assignor ([1416](https://github.com/IBM/sarama/pull/1416)). - Switch from cgo zstd package to pure Go implementation ([1477](https://github.com/IBM/sarama/pull/1477)). Improvements: - Allow creating ClusterAdmin from client ([1415](https://github.com/IBM/sarama/pull/1415)). - Set KafkaVersion in ListAcls method ([1452](https://github.com/IBM/sarama/pull/1452)). - Set request version in CreateACL ClusterAdmin method ([1458](https://github.com/IBM/sarama/pull/1458)). - Set request version in DeleteACL ClusterAdmin method ([1461](https://github.com/IBM/sarama/pull/1461)). - Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest ([1464](https://github.com/IBM/sarama/pull/1464)). - Remove direct usage of gofork ([1465](https://github.com/IBM/sarama/pull/1465)). - Add support for Go 1.13 ([1478](https://github.com/IBM/sarama/pull/1478)). - Improve behavior of NewMockListAclsResponse ([1481](https://github.com/IBM/sarama/pull/1481)). Bug Fixes: - Fix race condition in consumergroup example ([1434](https://github.com/IBM/sarama/pull/1434)). - Fix brokerProducer goroutine leak ([1442](https://github.com/IBM/sarama/pull/1442)). - Use released version of lz4 library ([1469](https://github.com/IBM/sarama/pull/1469)). - Set correct version in MockDeleteTopicsResponse ([1484](https://github.com/IBM/sarama/pull/1484)). - Fix CLI help message typo ([1494](https://github.com/IBM/sarama/pull/1494)). Known Issues: - Please **don't** use Zstd, as it doesn't work right now. See https://github.com/IBM/sarama/issues/1252 ## Version 1.23.1 (2019-07-22) Bug Fixes: - Fix fetch delete bug record ([1425](https://github.com/IBM/sarama/pull/1425)). - Handle SASL/OAUTHBEARER token rejection ([1428](https://github.com/IBM/sarama/pull/1428)). ## Version 1.23.0 (2019-07-02) New Features: - Add support for Kafka 2.3.0 ([1418](https://github.com/IBM/sarama/pull/1418)). - Add support for ListConsumerGroupOffsets v2 ([1374](https://github.com/IBM/sarama/pull/1374)). - Add support for DeleteConsumerGroup ([1417](https://github.com/IBM/sarama/pull/1417)). - Add support for SASLVersion configuration ([1410](https://github.com/IBM/sarama/pull/1410)). - Add kerberos support ([1366](https://github.com/IBM/sarama/pull/1366)). Improvements: - Improve sasl_scram_client example ([1406](https://github.com/IBM/sarama/pull/1406)). - Fix shutdown and race-condition in consumer-group example ([1404](https://github.com/IBM/sarama/pull/1404)). - Add support for error codes 77—81 ([1397](https://github.com/IBM/sarama/pull/1397)). - Pool internal objects allocated per message ([1385](https://github.com/IBM/sarama/pull/1385)). - Reduce packet decoder allocations ([1373](https://github.com/IBM/sarama/pull/1373)). - Support timeout when fetching metadata ([1359](https://github.com/IBM/sarama/pull/1359)). Bug Fixes: - Fix fetch size integer overflow ([1376](https://github.com/IBM/sarama/pull/1376)). - Handle and log throttled FetchResponses ([1383](https://github.com/IBM/sarama/pull/1383)). - Refactor misspelled word Resouce to Resource ([1368](https://github.com/IBM/sarama/pull/1368)). ## Version 1.22.1 (2019-04-29) Improvements: - Use zstd 1.3.8 ([1350](https://github.com/IBM/sarama/pull/1350)). - Add support for SaslHandshakeRequest v1 ([1354](https://github.com/IBM/sarama/pull/1354)). Bug Fixes: - Fix V5 MetadataRequest nullable topics array ([1353](https://github.com/IBM/sarama/pull/1353)). - Use a different SCRAM client for each broker connection ([1349](https://github.com/IBM/sarama/pull/1349)). - Fix AllowAutoTopicCreation for MetadataRequest greater than v3 ([1344](https://github.com/IBM/sarama/pull/1344)). ## Version 1.22.0 (2019-04-09) New Features: - Add Offline Replicas Operation to Client ([1318](https://github.com/IBM/sarama/pull/1318)). - Allow using proxy when connecting to broker ([1326](https://github.com/IBM/sarama/pull/1326)). - Implement ReadCommitted ([1307](https://github.com/IBM/sarama/pull/1307)). - Add support for Kafka 2.2.0 ([1331](https://github.com/IBM/sarama/pull/1331)). - Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes ([1331](https://github.com/IBM/sarama/pull/1295)). Improvements: - Unregister all broker metrics on broker stop ([1232](https://github.com/IBM/sarama/pull/1232)). - Add SCRAM authentication example ([1303](https://github.com/IBM/sarama/pull/1303)). - Add consumergroup examples ([1304](https://github.com/IBM/sarama/pull/1304)). - Expose consumer batch size metric ([1296](https://github.com/IBM/sarama/pull/1296)). - Add TLS options to console producer and consumer ([1300](https://github.com/IBM/sarama/pull/1300)). - Reduce client close bookkeeping ([1297](https://github.com/IBM/sarama/pull/1297)). - Satisfy error interface in create responses ([1154](https://github.com/IBM/sarama/pull/1154)). - Please lint gods ([1346](https://github.com/IBM/sarama/pull/1346)). Bug Fixes: - Fix multi consumer group instance crash ([1338](https://github.com/IBM/sarama/pull/1338)). - Update lz4 to latest version ([1347](https://github.com/IBM/sarama/pull/1347)). - Retry ErrNotCoordinatorForConsumer in new consumergroup session ([1231](https://github.com/IBM/sarama/pull/1231)). - Fix cleanup error handler ([1332](https://github.com/IBM/sarama/pull/1332)). - Fix rate condition in PartitionConsumer ([1156](https://github.com/IBM/sarama/pull/1156)). ## Version 1.21.0 (2019-02-24) New Features: - Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest ([1236](https://github.com/IBM/sarama/pull/1236)). - Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests ([1178](https://github.com/IBM/sarama/pull/1178)). - Implement SASL/OAUTHBEARER ([1240](https://github.com/IBM/sarama/pull/1240)). Improvements: - Add Go mod support ([1282](https://github.com/IBM/sarama/pull/1282)). - Add error codes 73—76 ([1239](https://github.com/IBM/sarama/pull/1239)). - Add retry backoff function ([1160](https://github.com/IBM/sarama/pull/1160)). - Maintain metadata in the producer even when retries are disabled ([1189](https://github.com/IBM/sarama/pull/1189)). - Include ReplicaAssignment in ListTopics ([1274](https://github.com/IBM/sarama/pull/1274)). - Add producer performance tool ([1222](https://github.com/IBM/sarama/pull/1222)). - Add support LogAppend timestamps ([1258](https://github.com/IBM/sarama/pull/1258)). Bug Fixes: - Fix potential deadlock when a heartbeat request fails ([1286](https://github.com/IBM/sarama/pull/1286)). - Fix consuming compacted topic ([1227](https://github.com/IBM/sarama/pull/1227)). - Set correct Kafka version for DescribeConfigsRequest v1 ([1277](https://github.com/IBM/sarama/pull/1277)). - Update kafka test version ([1273](https://github.com/IBM/sarama/pull/1273)). ## Version 1.20.1 (2019-01-10) New Features: - Add optional replica id in offset request ([1100](https://github.com/IBM/sarama/pull/1100)). Improvements: - Implement DescribeConfigs Request + Response v1 & v2 ([1230](https://github.com/IBM/sarama/pull/1230)). - Reuse compression objects ([1185](https://github.com/IBM/sarama/pull/1185)). - Switch from png to svg for GoDoc link in README ([1243](https://github.com/IBM/sarama/pull/1243)). - Fix typo in deprecation notice for FetchResponseBlock.Records ([1242](https://github.com/IBM/sarama/pull/1242)). - Fix typos in consumer metadata response file ([1244](https://github.com/IBM/sarama/pull/1244)). Bug Fixes: - Revert to individual msg retries for non-idempotent ([1203](https://github.com/IBM/sarama/pull/1203)). - Respect MaxMessageBytes limit for uncompressed messages ([1141](https://github.com/IBM/sarama/pull/1141)). ## Version 1.20.0 (2018-12-10) New Features: - Add support for zstd compression ([#1170](https://github.com/IBM/sarama/pull/1170)). - Add support for Idempotent Producer ([#1152](https://github.com/IBM/sarama/pull/1152)). - Add support support for Kafka 2.1.0 ([#1229](https://github.com/IBM/sarama/pull/1229)). - Add support support for OffsetCommit request/response pairs versions v1 to v5 ([#1201](https://github.com/IBM/sarama/pull/1201)). - Add support support for OffsetFetch request/response pair up to version v5 ([#1198](https://github.com/IBM/sarama/pull/1198)). Improvements: - Export broker's Rack setting ([#1173](https://github.com/IBM/sarama/pull/1173)). - Always use latest patch version of Go on CI ([#1202](https://github.com/IBM/sarama/pull/1202)). - Add error codes 61 to 72 ([#1195](https://github.com/IBM/sarama/pull/1195)). Bug Fixes: - Fix build without cgo ([#1182](https://github.com/IBM/sarama/pull/1182)). - Fix go vet suggestion in consumer group file ([#1209](https://github.com/IBM/sarama/pull/1209)). - Fix typos in code and comments ([#1228](https://github.com/IBM/sarama/pull/1228)). ## Version 1.19.0 (2018-09-27) New Features: - Implement a higher-level consumer group ([#1099](https://github.com/IBM/sarama/pull/1099)). Improvements: - Add support for Go 1.11 ([#1176](https://github.com/IBM/sarama/pull/1176)). Bug Fixes: - Fix encoding of `MetadataResponse` with version 2 and higher ([#1174](https://github.com/IBM/sarama/pull/1174)). - Fix race condition in mock async producer ([#1174](https://github.com/IBM/sarama/pull/1174)). ## Version 1.18.0 (2018-09-07) New Features: - Make `Partitioner.RequiresConsistency` vary per-message ([#1112](https://github.com/IBM/sarama/pull/1112)). - Add customizable partitioner ([#1118](https://github.com/IBM/sarama/pull/1118)). - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` ([#1055](https://github.com/IBM/sarama/pull/1055)). Improvements: - Add support for Kafka 2.0.0 ([#1149](https://github.com/IBM/sarama/pull/1149)). - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts ([#1123](https://github.com/IBM/sarama/pull/1123)). - Simpler offset management ([#1127](https://github.com/IBM/sarama/pull/1127)). Bug Fixes: - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka ([#1110](https://github.com/IBM/sarama/pull/1110)). - Fix consumer block when response did not contain all the expected topic/partition blocks ([#1086](https://github.com/IBM/sarama/pull/1086)). - Fix consumer block when response contains only constrol messages ([#1115](https://github.com/IBM/sarama/pull/1115)). - Add timeout config for ClusterAdmin requests ([#1142](https://github.com/IBM/sarama/pull/1142)). - Add version check when producing message with headers ([#1117](https://github.com/IBM/sarama/pull/1117)). - Fix `MetadataRequest` for empty list of topics ([#1132](https://github.com/IBM/sarama/pull/1132)). - Fix producer topic metadata on-demand fetch when topic error happens in metadata response ([#1125](https://github.com/IBM/sarama/pull/1125)). ## Version 1.17.0 (2018-05-30) New Features: - Add support for gzip compression levels ([#1044](https://github.com/IBM/sarama/pull/1044)). - Add support for Metadata request/response pairs versions v1 to v5 ([#1047](https://github.com/IBM/sarama/pull/1047), [#1069](https://github.com/IBM/sarama/pull/1069)). - Add versioning to JoinGroup request/response pairs ([#1098](https://github.com/IBM/sarama/pull/1098)) - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs ([#1065](https://github.com/IBM/sarama/pull/1065), [#1096](https://github.com/IBM/sarama/pull/1096), [#1027](https://github.com/IBM/sarama/pull/1027)). - Add `Controller()` method to Client interface ([#1063](https://github.com/IBM/sarama/pull/1063)). Improvements: - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp ([#1010](https://github.com/IBM/sarama/pull/1010)). - Expose missing protocol parts: `msgSet` and `recordBatch` ([#1049](https://github.com/IBM/sarama/pull/1049)). - Add support for v1 DeleteTopics Request ([#1052](https://github.com/IBM/sarama/pull/1052)). - Add support for Go 1.10 ([#1064](https://github.com/IBM/sarama/pull/1064)). - Claim support for Kafka 1.1.0 ([#1073](https://github.com/IBM/sarama/pull/1073)). Bug Fixes: - Fix FindCoordinatorResponse.encode to allow nil Coordinator ([#1050](https://github.com/IBM/sarama/pull/1050), [#1051](https://github.com/IBM/sarama/pull/1051)). - Clear all metadata when we have the latest topic info ([#1033](https://github.com/IBM/sarama/pull/1033)). - Make `PartitionConsumer.Close` idempotent ([#1092](https://github.com/IBM/sarama/pull/1092)). ## Version 1.16.0 (2018-02-12) New Features: - Add support for the Create/Delete Topics request/response pairs ([#1007](https://github.com/IBM/sarama/pull/1007), [#1008](https://github.com/IBM/sarama/pull/1008)). - Add support for the Describe/Create/Delete ACL request/response pairs ([#1009](https://github.com/IBM/sarama/pull/1009)). - Add support for the five transaction-related request/response pairs ([#1016](https://github.com/IBM/sarama/pull/1016)). Improvements: - Permit setting version on mock producer responses ([#999](https://github.com/IBM/sarama/pull/999)). - Add `NewMockBrokerListener` helper for testing TLS connections ([#1019](https://github.com/IBM/sarama/pull/1019)). - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB which results in much higher throughput in most cases ([#1024](https://github.com/IBM/sarama/pull/1024)). - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to reduce CPU and memory usage when processing many partitions ([#1028](https://github.com/IBM/sarama/pull/1028)). - Assign relative offsets to messages in the producer to save the brokers a recompression pass ([#1002](https://github.com/IBM/sarama/pull/1002), [#1015](https://github.com/IBM/sarama/pull/1015)). Bug Fixes: - Fix producing uncompressed batches with the new protocol format ([#1032](https://github.com/IBM/sarama/issues/1032)). - Fix consuming compacted topics with the new protocol format ([#1005](https://github.com/IBM/sarama/issues/1005)). - Fix consuming topics with a mix of protocol formats ([#1021](https://github.com/IBM/sarama/issues/1021)). - Fix consuming when the broker includes multiple batches in a single response ([#1022](https://github.com/IBM/sarama/issues/1022)). - Fix detection of `PartialTrailingMessage` when the partial message was truncated before the magic value indicating its version ([#1030](https://github.com/IBM/sarama/pull/1030)). - Fix expectation-checking in the mock of `SyncProducer.SendMessages` ([#1035](https://github.com/IBM/sarama/pull/1035)). ## Version 1.15.0 (2017-12-08) New Features: - Claim official support for Kafka 1.0, though it did already work ([#984](https://github.com/IBM/sarama/pull/984)). - Helper methods for Kafka version numbers to/from strings ([#989](https://github.com/IBM/sarama/pull/989)). - Implement CreatePartitions request/response ([#985](https://github.com/IBM/sarama/pull/985)). Improvements: - Add error codes 45-60 ([#986](https://github.com/IBM/sarama/issues/986)). Bug Fixes: - Fix slow consuming for certain Kafka 0.11/1.0 configurations ([#982](https://github.com/IBM/sarama/pull/982)). - Correctly determine when a FetchResponse contains the new message format ([#990](https://github.com/IBM/sarama/pull/990)). - Fix producing with multiple headers ([#996](https://github.com/IBM/sarama/pull/996)). - Fix handling of truncated record batches ([#998](https://github.com/IBM/sarama/pull/998)). - Fix leaking metrics when closing brokers ([#991](https://github.com/IBM/sarama/pull/991)). ## Version 1.14.0 (2017-11-13) New Features: - Add support for the new Kafka 0.11 record-batch format, including the wire protocol and the necessary behavioural changes in the producer and consumer. Transactions and idempotency are not yet supported, but producing and consuming should work with all the existing bells and whistles (batching, compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta of Arista Networks for this work. Part of ([#901](https://github.com/IBM/sarama/issues/901)). Bug Fixes: - Fix encoding of ProduceResponse versions in test ([#970](https://github.com/IBM/sarama/pull/970)). - Return partial replicas list when we have it ([#975](https://github.com/IBM/sarama/pull/975)). ## Version 1.13.0 (2017-10-04) New Features: - Support for FetchRequest version 3 ([#905](https://github.com/IBM/sarama/pull/905)). - Permit setting version on mock FetchResponses ([#939](https://github.com/IBM/sarama/pull/939)). - Add a configuration option to support storing only minimal metadata for extremely large clusters ([#937](https://github.com/IBM/sarama/pull/937)). - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets ([#932](https://github.com/IBM/sarama/pull/932)). Improvements: - Provide the block-level timestamp when consuming compressed messages ([#885](https://github.com/IBM/sarama/issues/885)). - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned by the broker, which can be meaningful ([#930](https://github.com/IBM/sarama/pull/930)). - Use a `Ticker` to reduce consumer timer overhead at the cost of higher variance in the actual timeout ([#933](https://github.com/IBM/sarama/pull/933)). Bug Fixes: - Gracefully handle messages with negative timestamps ([#907](https://github.com/IBM/sarama/pull/907)). - Raise a proper error when encountering an unknown message version ([#940](https://github.com/IBM/sarama/pull/940)). ## Version 1.12.0 (2017-05-08) New Features: - Added support for the `ApiVersions` request and response pair, and Kafka version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note that you still need to specify the Kafka version in the Sarama configuration for the time being. - Added a `Brokers` method to the Client which returns the complete set of active brokers ([#813](https://github.com/IBM/sarama/pull/813)). - Added an `InSyncReplicas` method to the Client which returns the set of all in-sync broker IDs for the given partition, now that the Kafka versions for which this was misleading are no longer in our supported set ([#872](https://github.com/IBM/sarama/pull/872)). - Added a `NewCustomHashPartitioner` method which allows constructing a hash partitioner with a custom hash method in case the default (FNV-1a) is not suitable ([#837](https://github.com/IBM/sarama/pull/837), [#841](https://github.com/IBM/sarama/pull/841)). Improvements: - Recognize more Kafka error codes ([#859](https://github.com/IBM/sarama/pull/859)). Bug Fixes: - Fix an issue where decoding a malformed FetchRequest would not return the correct error ([#818](https://github.com/IBM/sarama/pull/818)). - Respect ordering of group protocols in JoinGroupRequests. This fix is transparent if you're using the `AddGroupProtocol` or `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` ([#812](https://github.com/IBM/sarama/issues/812)). - Fix an alignment-related issue with atomics on 32-bit architectures ([#859](https://github.com/IBM/sarama/pull/859)). ## Version 1.11.0 (2016-12-20) _Important:_ As of Sarama 1.11 it is necessary to set the config value of `Producer.Return.Successes` to true in order to use the SyncProducer. Previous versions would silently override this value when instantiating a SyncProducer which led to unexpected values and data races. New Features: - Metrics! Thanks to Sébastien Launay for all his work on this feature ([#701](https://github.com/IBM/sarama/pull/701), [#746](https://github.com/IBM/sarama/pull/746), [#766](https://github.com/IBM/sarama/pull/766)). - Add support for LZ4 compression ([#786](https://github.com/IBM/sarama/pull/786)). - Add support for ListOffsetRequest v1 and Kafka 0.10.1 ([#775](https://github.com/IBM/sarama/pull/775)). - Added a `HighWaterMarks` method to the Consumer which aggregates the `HighWaterMarkOffset` values of its child topic/partitions ([#769](https://github.com/IBM/sarama/pull/769)). Bug Fixes: - Fixed producing when using timestamps, compression and Kafka 0.10 ([#759](https://github.com/IBM/sarama/pull/759)). - Added missing decoder methods to DescribeGroups response ([#756](https://github.com/IBM/sarama/pull/756)). - Fix producer shutdown when `Return.Errors` is disabled ([#787](https://github.com/IBM/sarama/pull/787)). - Don't mutate configuration in SyncProducer ([#790](https://github.com/IBM/sarama/pull/790)). - Fix crash on SASL initialization failure ([#795](https://github.com/IBM/sarama/pull/795)). ## Version 1.10.1 (2016-08-30) Bug Fixes: - Fix the documentation for `HashPartitioner` which was incorrect ([#717](https://github.com/IBM/sarama/pull/717)). - Permit client creation even when it is limited by ACLs ([#722](https://github.com/IBM/sarama/pull/722)). - Several fixes to the consumer timer optimization code, regressions introduced in v1.10.0. Go's timers are finicky ([#730](https://github.com/IBM/sarama/pull/730), [#733](https://github.com/IBM/sarama/pull/733), [#734](https://github.com/IBM/sarama/pull/734)). - Handle consuming compressed relative offsets with Kafka 0.10 ([#735](https://github.com/IBM/sarama/pull/735)). ## Version 1.10.0 (2016-08-02) _Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of Kafka you are running against (via the `config.Version` value) in order to use features that may not be compatible with old Kafka versions. If you don't specify this value it will default to 0.8.2 (the minimum supported), and trying to use more recent features (like the offset manager) will fail with an error. _Also:_ The offset-manager's behaviour has been changed to match the upstream java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and [#713](https://github.com/IBM/sarama/pull/713)). If you use the offset-manager, please ensure that you are committing one *greater* than the last consumed message offset or else you may end up consuming duplicate messages. New Features: - Support for Kafka 0.10 ([#672](https://github.com/IBM/sarama/pull/672), [#678](https://github.com/IBM/sarama/pull/678), [#681](https://github.com/IBM/sarama/pull/681), and others). - Support for configuring the target Kafka version ([#676](https://github.com/IBM/sarama/pull/676)). - Batch producing support in the SyncProducer ([#677](https://github.com/IBM/sarama/pull/677)). - Extend producer mock to allow setting expectations on message contents ([#667](https://github.com/IBM/sarama/pull/667)). Improvements: - Support `nil` compressed messages for deleting in compacted topics ([#634](https://github.com/IBM/sarama/pull/634)). - Pre-allocate decoding errors, greatly reducing heap usage and GC time against misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). - Re-use consumer expiry timers, removing one allocation per consumed message ([#707](https://github.com/IBM/sarama/pull/707)). Bug Fixes: - Actually default the client ID to "sarama" like we say we do ([#664](https://github.com/IBM/sarama/pull/664)). - Fix a rare issue where `Client.Leader` could return the wrong error ([#685](https://github.com/IBM/sarama/pull/685)). - Fix a possible tight loop in the consumer ([#693](https://github.com/IBM/sarama/pull/693)). - Match upstream's offset-tracking behaviour ([#705](https://github.com/IBM/sarama/pull/705)). - Report UnknownTopicOrPartition errors from the offset manager ([#706](https://github.com/IBM/sarama/pull/706)). - Fix possible negative partition value from the HashPartitioner ([#709](https://github.com/IBM/sarama/pull/709)). ## Version 1.9.0 (2016-05-16) New Features: - Add support for custom offset manager retention durations ([#602](https://github.com/IBM/sarama/pull/602)). - Publish low-level mocks to enable testing of third-party producer/consumer implementations ([#570](https://github.com/IBM/sarama/pull/570)). - Declare support for Golang 1.6 ([#611](https://github.com/IBM/sarama/pull/611)). - Support for SASL plain-text auth ([#648](https://github.com/IBM/sarama/pull/648)). Improvements: - Simplified broker locking scheme slightly ([#604](https://github.com/IBM/sarama/pull/604)). - Documentation cleanup ([#605](https://github.com/IBM/sarama/pull/605), [#621](https://github.com/IBM/sarama/pull/621), [#654](https://github.com/IBM/sarama/pull/654)). Bug Fixes: - Fix race condition shutting down the OffsetManager ([#658](https://github.com/IBM/sarama/pull/658)). ## Version 1.8.0 (2016-02-01) New Features: - Full support for Kafka 0.9: - All protocol messages and fields ([#586](https://github.com/IBM/sarama/pull/586), [#588](https://github.com/IBM/sarama/pull/588), [#590](https://github.com/IBM/sarama/pull/590)). - Verified that TLS support works ([#581](https://github.com/IBM/sarama/pull/581)). - Fixed the OffsetManager compatibility ([#585](https://github.com/IBM/sarama/pull/585)). Improvements: - Optimize for fewer system calls when reading from the network ([#584](https://github.com/IBM/sarama/pull/584)). - Automatically retry `InvalidMessage` errors to match upstream behaviour ([#589](https://github.com/IBM/sarama/pull/589)). ## Version 1.7.0 (2015-12-11) New Features: - Preliminary support for Kafka 0.9 ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several caveats: - Protocol-layer support is mostly in place ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 renamed some messages and fields, which we did not in order to preserve API compatibility. - The producer and consumer work against 0.9, but the offset manager does not ([#573](https://github.com/IBM/sarama/pull/573)). - TLS support may or may not work ([#581](https://github.com/IBM/sarama/pull/581)). Improvements: - Don't wait for request timeouts on dead brokers, greatly speeding recovery when the TCP connection is left hanging ([#548](https://github.com/IBM/sarama/pull/548)). - Refactored part of the producer. The new version provides a much more elegant solution to [#449](https://github.com/IBM/sarama/pull/449). It is also slightly more efficient, and much more precise in calculating batch sizes when compression is used ([#549](https://github.com/IBM/sarama/pull/549), [#550](https://github.com/IBM/sarama/pull/550), [#551](https://github.com/IBM/sarama/pull/551)). Bug Fixes: - Fix race condition in consumer test mock ([#553](https://github.com/IBM/sarama/pull/553)). ## Version 1.6.1 (2015-09-25) Bug Fixes: - Fix panic that could occur if a user-supplied message value failed to encode ([#449](https://github.com/IBM/sarama/pull/449)). ## Version 1.6.0 (2015-09-04) New Features: - Implementation of a consumer offset manager using the APIs introduced in Kafka 0.8.2. The API is designed mainly for integration into a future high-level consumer, not for direct use, although it is *possible* to use it directly. ([#461](https://github.com/IBM/sarama/pull/461)). Improvements: - CRC32 calculation is much faster on machines with SSE4.2 instructions, removing a major hotspot from most profiles ([#255](https://github.com/IBM/sarama/pull/255)). Bug Fixes: - Make protocol decoding more robust against some malformed packets generated by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways ([#528](https://github.com/IBM/sarama/pull/528)). - Fix a potential race condition panic in the consumer on shutdown ([#529](https://github.com/IBM/sarama/pull/529)). ## Version 1.5.0 (2015-08-17) New Features: - TLS-encrypted network connections are now supported. This feature is subject to change when Kafka releases built-in TLS support, but for now this is enough to work with TLS-terminating proxies ([#154](https://github.com/IBM/sarama/pull/154)). Improvements: - The consumer will not block if a single partition is not drained by the user; all other partitions will continue to consume normally ([#485](https://github.com/IBM/sarama/pull/485)). - Formatting of error strings has been much improved ([#495](https://github.com/IBM/sarama/pull/495)). - Internal refactoring of the producer for code cleanliness and to enable future work ([#300](https://github.com/IBM/sarama/pull/300)). Bug Fixes: - Fix a potential deadlock in the consumer on shutdown ([#475](https://github.com/IBM/sarama/pull/475)). ## Version 1.4.3 (2015-07-21) Bug Fixes: - Don't include the partitioner in the producer's "fetch partitions" circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). - Don't retry messages until the broker is closed when abandoning a broker in the producer ([#468](https://github.com/IBM/sarama/pull/468)). - Update the import path for snappy-go, it has moved again and the API has changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). ## Version 1.4.2 (2015-05-27) Bug Fixes: - Update the import path for snappy-go, it has moved from google code to github ([#456](https://github.com/IBM/sarama/pull/456)). ## Version 1.4.1 (2015-05-25) Improvements: - Optimizations when decoding snappy messages, thanks to John Potocny ([#446](https://github.com/IBM/sarama/pull/446)). Bug Fixes: - Fix hypothetical race conditions on producer shutdown ([#450](https://github.com/IBM/sarama/pull/450), [#451](https://github.com/IBM/sarama/pull/451)). ## Version 1.4.0 (2015-05-01) New Features: - The consumer now implements `Topics()` and `Partitions()` methods to enable users to dynamically choose what topics/partitions to consume without instantiating a full client ([#431](https://github.com/IBM/sarama/pull/431)). - The partition-consumer now exposes the high water mark offset value returned by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). - Added a `kafka-console-consumer` tool capable of handling multiple partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` ([#439](https://github.com/IBM/sarama/pull/439), [#442](https://github.com/IBM/sarama/pull/442)). Improvements: - The producer's logging during retry scenarios is more consistent, more useful, and slightly less verbose ([#429](https://github.com/IBM/sarama/pull/429)). - The client now shuffles its initial list of seed brokers in order to prevent thundering herd on the first broker in the list ([#441](https://github.com/IBM/sarama/pull/441)). Bug Fixes: - The producer now correctly manages its state if retries occur when it is shutting down, fixing several instances of confusing behaviour and at least one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). - The consumer now handles messages for different partitions asynchronously, making it much more resilient to specific user code ordering ([#325](https://github.com/IBM/sarama/pull/325)). ## Version 1.3.0 (2015-04-16) New Features: - The client now tracks consumer group coordinators using ConsumerMetadataRequests similar to how it tracks partition leadership using regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). This adds two methods to the client API: - `Coordinator(consumerGroup string) (*Broker, error)` - `RefreshCoordinator(consumerGroup string) error` Improvements: - ConsumerMetadataResponses now automatically create a Broker object out of the ID/address/port combination for the Coordinator; accessing the fields individually has been deprecated ([#413](https://github.com/IBM/sarama/pull/413)). - Much improved handling of `OffsetOutOfRange` errors in the consumer. Consumers will fail to start if the provided offset is out of range ([#418](https://github.com/IBM/sarama/pull/418)) and they will automatically shut down if the offset falls out of range ([#424](https://github.com/IBM/sarama/pull/424)). - Small performance improvement in encoding and decoding protocol messages ([#427](https://github.com/IBM/sarama/pull/427)). Bug Fixes: - Fix a rare race condition in the client's background metadata refresher if it happens to be activated while the client is being closed ([#422](https://github.com/IBM/sarama/pull/422)). ## Version 1.2.0 (2015-04-07) Improvements: - The producer's behaviour when `Flush.Frequency` is set is now more intuitive ([#389](https://github.com/IBM/sarama/pull/389)). - The producer is now somewhat more memory-efficient during and after retrying messages due to an improved queue implementation ([#396](https://github.com/IBM/sarama/pull/396)). - The consumer produces much more useful logging output when leadership changes ([#385](https://github.com/IBM/sarama/pull/385)). - The client's `GetOffset` method will now automatically refresh metadata and retry once in the event of stale information or similar ([#394](https://github.com/IBM/sarama/pull/394)). - Broker connections now have support for using TCP keepalives ([#407](https://github.com/IBM/sarama/issues/407)). Bug Fixes: - The OffsetCommitRequest message now correctly implements all three possible API versions ([#390](https://github.com/IBM/sarama/pull/390), [#400](https://github.com/IBM/sarama/pull/400)). ## Version 1.1.0 (2015-03-20) Improvements: - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly broken topics don't choke throughput ([#373](https://github.com/IBM/sarama/pull/373)). Bug Fixes: - Fix the producer's internal reference counting in certain unusual scenarios ([#367](https://github.com/IBM/sarama/pull/367)). - Fix the consumer's internal reference counting in certain unusual scenarios ([#369](https://github.com/IBM/sarama/pull/369)). - Fix a condition where the producer's internal control messages could have gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). - Fix an issue where invalid partition lists would be cached when asking for metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). ## Version 1.0.0 (2015-03-17) Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. - The consumer has been rewritten to only open one connection per broker instead of one connection per partition. - The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. - For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. - All the configuration values have been unified in the `Config` struct. - Much improved test suite. golang-github-ibm-sarama-1.43.2/CODE_OF_CONDUCT.md000066400000000000000000000121521461256741300211500ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at dominic.evans@uk.ibm.com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. golang-github-ibm-sarama-1.43.2/CONTRIBUTING.md000066400000000000000000000105641461256741300206070ustar00rootroot00000000000000# Contributing [fork]: https://github.com/IBM/sarama/fork [pr]: https://github.com/IBM/sarama/compare [released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license Hi there! We are thrilled that you would like to contribute to Sarama. Contributions are always welcome, both reporting issues and submitting pull requests! ## Reporting issues Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. - What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version. - You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. - Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. Also, please include the following information about your environment, so we can help you faster: - What version of Kafka are you using? - What version of Go are you using? - What are the values of your Producer/Consumer/Client configuration? ## Contributing a change Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: ``` feat: this is my commit message Signed-off-by: Random J Developer ``` Git even has a `-s` command line option to append this automatically to your commit message: ``` $ git commit -s -m 'This is my commit message' ``` Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. - If you plan to work on something major, please open an issue to discuss the design first. - Don't break backwards compatibility. If you really have to, open an issue to discuss this first. - Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. - Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs. - Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. - You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. - Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. - Make sure your code is supported by all the Go versions we support. You can rely on GitHub Actions for testing older Go versions. ## Submitting a pull request 0. [Fork][fork] and clone the repository 1. Create a new branch: `git checkout -b my-branch-name` 2. Make your change, push to your fork and [submit a pull request][pr] 3. Wait for your pull request to be reviewed and merged. Here are a few things you can do that will increase the likelihood of your pull request being accepted: - Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. - Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). ## Further Reading - [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) - [The most powerful contributor agreement](https://lwn.net/Articles/592503/) - [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) - [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) - [GitHub Help](https://help.github.com) golang-github-ibm-sarama-1.43.2/Dockerfile.kafka000066400000000000000000000035211461256741300214170ustar00rootroot00000000000000FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2 USER root RUN microdnf update -y \ && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ && microdnf reinstall -y tzdata \ && microdnf clean all ENV JAVA_HOME=/usr/lib/jvm/jre-11 # https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html # Ensure Java doesn't cache any dns results RUN cd /etc/java/java-11-openjdk/*/conf/security \ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ && echo 'networkaddress.cache.ttl=0' >> java.security \ && echo 'networkaddress.cache.negative.ttl=0' >> java.security ARG SCALA_VERSION="2.13" ARG KAFKA_VERSION="3.6.0" # https://github.com/apache/kafka/blob/9989b68d0d38c8f1357f78bf9d53a58c1476188d/tests/docker/Dockerfile#L46-L72 ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" # older kafka versions depend upon jaxb-api being bundled with the JDK, but it # was removed from Java 11 so work around that by including it in the kafka # libs dir regardless WORKDIR /tmp RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ && rm -f jaxb-api-2.3.0.jar WORKDIR /opt/kafka-${KAFKA_VERSION} ENV JAVA_MAJOR_VERSION=11 RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh COPY entrypoint.sh / USER 65534:65534 ENTRYPOINT ["/entrypoint.sh"] golang-github-ibm-sarama-1.43.2/LICENSE.md000066400000000000000000000021161461256741300177540ustar00rootroot00000000000000# MIT License Copyright (c) 2013 Shopify Copyright (c) 2023 IBM Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. golang-github-ibm-sarama-1.43.2/Makefile000066400000000000000000000024341461256741300200130ustar00rootroot00000000000000default: fmt get update test lint GO := go GOBIN := $(shell pwd)/bin GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) GOTEST := $(GO) test -v -race -coverprofile=profile.out -covermode=atomic FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') $(GOBIN)/tparse: GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.11.1 get: $(GO) get ./... $(GO) mod verify $(GO) mod tidy update: $(GO) get -u -v ./... $(GO) mod verify $(GO) mod tidy fmt: gofmt -s -l -w $(FILES) $(TESTS) lint: GOFLAGS="-tags=functional" golangci-lint run test: $(GOBIN)/tparse $(GOTEST) -timeout 2m -json ./... \ | tee output.json | $(GOBIN)/tparse -follow -all [ -z "$${GITHUB_STEP_SUMMARY}" ] \ || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" .PHONY: test_functional test_functional: $(GOBIN)/tparse $(GOTEST) -timeout 15m -tags=functional -json ./... \ | tee output.json | $(GOBIN)/tparse -follow -all [ -z "$${GITHUB_STEP_SUMMARY:-}" ] \ || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" golang-github-ibm-sarama-1.43.2/README.md000066400000000000000000000040751461256741300176350ustar00rootroot00000000000000# sarama [![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/IBM/sarama/badge?style=flat)](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7996/badge)](https://www.bestpractices.dev/projects/7996) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started - API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the standard Go [module version numbering](https://go.dev/doc/modules/version-numbers) scheme. A changelog is available [here](CHANGELOG.md). ## Contributing - Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md). - Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! golang-github-ibm-sarama-1.43.2/SECURITY.md000066400000000000000000000013601461256741300201410ustar00rootroot00000000000000# Security ## Reporting Security Issues **Please do not report security vulnerabilities through public GitHub issues.** The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new). See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions. Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages. golang-github-ibm-sarama-1.43.2/Vagrantfile000066400000000000000000000005421461256741300205360ustar00rootroot00000000000000# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 Vagrant.configure("2") do |config| config.vm.box = "ubuntu/bionic64" config.vm.provision :shell, path: "vagrant/provision.sh" config.vm.network "private_network", ip: "192.168.100.67" config.vm.provider "virtualbox" do |v| v.memory = MEMORY end end golang-github-ibm-sarama-1.43.2/acl_bindings.go000066400000000000000000000052641461256741300213220ustar00rootroot00000000000000package sarama // Resource holds information about acl resource type type Resource struct { ResourceType AclResourceType ResourceName string ResourcePatternType AclResourcePatternType } func (r *Resource) encode(pe packetEncoder, version int16) error { pe.putInt8(int8(r.ResourceType)) if err := pe.putString(r.ResourceName); err != nil { return err } if version == 1 { if r.ResourcePatternType == AclPatternUnknown { Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead") r.ResourcePatternType = AclPatternLiteral } pe.putInt8(int8(r.ResourcePatternType)) } return nil } func (r *Resource) decode(pd packetDecoder, version int16) (err error) { resourceType, err := pd.getInt8() if err != nil { return err } r.ResourceType = AclResourceType(resourceType) if r.ResourceName, err = pd.getString(); err != nil { return err } if version == 1 { pattern, err := pd.getInt8() if err != nil { return err } r.ResourcePatternType = AclResourcePatternType(pattern) } return nil } // Acl holds information about acl type type Acl struct { Principal string Host string Operation AclOperation PermissionType AclPermissionType } func (a *Acl) encode(pe packetEncoder) error { if err := pe.putString(a.Principal); err != nil { return err } if err := pe.putString(a.Host); err != nil { return err } pe.putInt8(int8(a.Operation)) pe.putInt8(int8(a.PermissionType)) return nil } func (a *Acl) decode(pd packetDecoder, version int16) (err error) { if a.Principal, err = pd.getString(); err != nil { return err } if a.Host, err = pd.getString(); err != nil { return err } operation, err := pd.getInt8() if err != nil { return err } a.Operation = AclOperation(operation) permissionType, err := pd.getInt8() if err != nil { return err } a.PermissionType = AclPermissionType(permissionType) return nil } // ResourceAcls is an acl resource type type ResourceAcls struct { Resource Acls []*Acl } func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { if err := r.Resource.encode(pe, version); err != nil { return err } if err := pe.putArrayLength(len(r.Acls)); err != nil { return err } for _, acl := range r.Acls { if err := acl.encode(pe); err != nil { return err } } return nil } func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { if err := r.Resource.decode(pd, version); err != nil { return err } n, err := pd.getArrayLength() if err != nil { return err } r.Acls = make([]*Acl, n) for i := 0; i < n; i++ { r.Acls[i] = new(Acl) if err := r.Acls[i].decode(pd, version); err != nil { return err } } return nil } golang-github-ibm-sarama-1.43.2/acl_create_request.go000066400000000000000000000033701461256741300225340ustar00rootroot00000000000000package sarama // CreateAclsRequest is an acl creation request type CreateAclsRequest struct { Version int16 AclCreations []*AclCreation } func (c *CreateAclsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.AclCreations)); err != nil { return err } for _, aclCreation := range c.AclCreations { if err := aclCreation.encode(pe, c.Version); err != nil { return err } } return nil } func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { c.Version = version n, err := pd.getArrayLength() if err != nil { return err } c.AclCreations = make([]*AclCreation, n) for i := 0; i < n; i++ { c.AclCreations[i] = new(AclCreation) if err := c.AclCreations[i].decode(pd, version); err != nil { return err } } return nil } func (c *CreateAclsRequest) key() int16 { return 30 } func (c *CreateAclsRequest) version() int16 { return c.Version } func (c *CreateAclsRequest) headerVersion() int16 { return 1 } func (c *CreateAclsRequest) isValidVersion() bool { return c.Version >= 0 && c.Version <= 1 } func (c *CreateAclsRequest) requiredVersion() KafkaVersion { switch c.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } // AclCreation is a wrapper around Resource and Acl type type AclCreation struct { Resource Acl } func (a *AclCreation) encode(pe packetEncoder, version int16) error { if err := a.Resource.encode(pe, version); err != nil { return err } if err := a.Acl.encode(pe); err != nil { return err } return nil } func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { if err := a.Resource.decode(pd, version); err != nil { return err } if err := a.Acl.decode(pd, version); err != nil { return err } return nil } golang-github-ibm-sarama-1.43.2/acl_create_request_test.go000066400000000000000000000027221461256741300235730ustar00rootroot00000000000000package sarama import "testing" var ( aclCreateRequest = []byte{ 0, 0, 0, 1, 3, // resource type = group 0, 5, 'g', 'r', 'o', 'u', 'p', 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 2, // all 2, // deny } aclCreateRequestv1 = []byte{ 0, 0, 0, 1, 3, // resource type = group 0, 5, 'g', 'r', 'o', 'u', 'p', 3, // resource pattern type = literal 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 2, // all 2, // deny } ) func TestCreateAclsRequestv0(t *testing.T) { req := &CreateAclsRequest{ Version: 0, AclCreations: []*AclCreation{ { Resource: Resource{ ResourceType: AclResourceGroup, ResourceName: "group", }, Acl: Acl{ Principal: "principal", Host: "host", Operation: AclOperationAll, PermissionType: AclPermissionDeny, }, }, }, } testRequest(t, "create request", req, aclCreateRequest) } func TestCreateAclsRequestv1(t *testing.T) { req := &CreateAclsRequest{ Version: 1, AclCreations: []*AclCreation{ { Resource: Resource{ ResourceType: AclResourceGroup, ResourceName: "group", ResourcePatternType: AclPatternLiteral, }, Acl: Acl{ Principal: "principal", Host: "host", Operation: AclOperationAll, PermissionType: AclPermissionDeny, }, }, }, } testRequest(t, "create request v1", req, aclCreateRequestv1) } golang-github-ibm-sarama-1.43.2/acl_create_response.go000066400000000000000000000042341461256741300227020ustar00rootroot00000000000000package sarama import "time" // CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { Version int16 ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse } func (c *CreateAclsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { return err } for _, aclCreationResponse := range c.AclCreationResponses { if err := aclCreationResponse.encode(pe); err != nil { return err } } return nil } func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } c.AclCreationResponses = make([]*AclCreationResponse, n) for i := 0; i < n; i++ { c.AclCreationResponses[i] = new(AclCreationResponse) if err := c.AclCreationResponses[i].decode(pd, version); err != nil { return err } } return nil } func (c *CreateAclsResponse) key() int16 { return 30 } func (c *CreateAclsResponse) version() int16 { return c.Version } func (c *CreateAclsResponse) headerVersion() int16 { return 0 } func (c *CreateAclsResponse) isValidVersion() bool { return c.Version >= 0 && c.Version <= 1 } func (c *CreateAclsResponse) requiredVersion() KafkaVersion { switch c.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *CreateAclsResponse) throttleTime() time.Duration { return r.ThrottleTime } // AclCreationResponse is an acl creation response type type AclCreationResponse struct { Err KError ErrMsg *string } func (a *AclCreationResponse) encode(pe packetEncoder) error { pe.putInt16(int16(a.Err)) if err := pe.putNullableString(a.ErrMsg); err != nil { return err } return nil } func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { kerr, err := pd.getInt16() if err != nil { return err } a.Err = KError(kerr) if a.ErrMsg, err = pd.getNullableString(); err != nil { return err } return nil } golang-github-ibm-sarama-1.43.2/acl_create_response_test.go000066400000000000000000000013711461256741300237400ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( createResponseWithError = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, 0, 42, 0, 5, 'e', 'r', 'r', 'o', 'r', } createResponseArray = []byte{ 0, 0, 0, 100, 0, 0, 0, 2, 0, 42, 0, 5, 'e', 'r', 'r', 'o', 'r', 0, 0, 255, 255, } ) func TestCreateAclsResponse(t *testing.T) { errmsg := "error" resp := &CreateAclsResponse{ ThrottleTime: 100 * time.Millisecond, AclCreationResponses: []*AclCreationResponse{{ Err: ErrInvalidRequest, ErrMsg: &errmsg, }}, } testResponse(t, "response with error", resp, createResponseWithError) resp.AclCreationResponses = append(resp.AclCreationResponses, new(AclCreationResponse)) testResponse(t, "response array", resp, createResponseArray) } golang-github-ibm-sarama-1.43.2/acl_delete_request.go000066400000000000000000000023371461256741300225350ustar00rootroot00000000000000package sarama // DeleteAclsRequest is a delete acl request type DeleteAclsRequest struct { Version int Filters []*AclFilter } func (d *DeleteAclsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(d.Filters)); err != nil { return err } for _, filter := range d.Filters { filter.Version = d.Version if err := filter.encode(pe); err != nil { return err } } return nil } func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { d.Version = int(version) n, err := pd.getArrayLength() if err != nil { return err } d.Filters = make([]*AclFilter, n) for i := 0; i < n; i++ { d.Filters[i] = new(AclFilter) d.Filters[i].Version = int(version) if err := d.Filters[i].decode(pd, version); err != nil { return err } } return nil } func (d *DeleteAclsRequest) key() int16 { return 31 } func (d *DeleteAclsRequest) version() int16 { return int16(d.Version) } func (d *DeleteAclsRequest) headerVersion() int16 { return 1 } func (d *DeleteAclsRequest) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } golang-github-ibm-sarama-1.43.2/acl_delete_request_test.go000066400000000000000000000046001461256741300235670ustar00rootroot00000000000000package sarama import "testing" var ( aclDeleteRequestNullsv1 = []byte{ 0, 0, 0, 1, 1, 255, 255, 1, // Any 255, 255, 255, 255, 11, 3, } aclDeleteRequestv1 = []byte{ 0, 0, 0, 1, 1, // any 0, 6, 'f', 'i', 'l', 't', 'e', 'r', 1, // Any Filter 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 4, // write 3, // allow } aclDeleteRequestNulls = []byte{ 0, 0, 0, 1, 1, 255, 255, 255, 255, 255, 255, 11, 3, } aclDeleteRequest = []byte{ 0, 0, 0, 1, 1, // any 0, 6, 'f', 'i', 'l', 't', 'e', 'r', 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 4, // write 3, // allow } aclDeleteRequestArray = []byte{ 0, 0, 0, 2, 1, 0, 6, 'f', 'i', 'l', 't', 'e', 'r', 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 4, // write 3, // allow 2, 0, 5, 't', 'o', 'p', 'i', 'c', 255, 255, 255, 255, 6, 2, } ) func TestDeleteAclsRequest(t *testing.T) { req := &DeleteAclsRequest{ Filters: []*AclFilter{{ ResourceType: AclResourceAny, Operation: AclOperationAlterConfigs, PermissionType: AclPermissionAllow, }}, } testRequest(t, "delete request nulls", req, aclDeleteRequestNulls) req.Filters[0].ResourceName = nullString("filter") req.Filters[0].Principal = nullString("principal") req.Filters[0].Host = nullString("host") req.Filters[0].Operation = AclOperationWrite testRequest(t, "delete request", req, aclDeleteRequest) req.Filters = append(req.Filters, &AclFilter{ ResourceType: AclResourceTopic, ResourceName: nullString("topic"), Operation: AclOperationDelete, PermissionType: AclPermissionDeny, }) testRequest(t, "delete request array", req, aclDeleteRequestArray) } func TestDeleteAclsRequestV1(t *testing.T) { req := &DeleteAclsRequest{ Version: 1, Filters: []*AclFilter{{ ResourceType: AclResourceAny, Operation: AclOperationAlterConfigs, PermissionType: AclPermissionAllow, ResourcePatternTypeFilter: AclPatternAny, }}, } testRequest(t, "delete request nulls", req, aclDeleteRequestNullsv1) req.Filters[0].ResourceName = nullString("filter") req.Filters[0].Principal = nullString("principal") req.Filters[0].Host = nullString("host") req.Filters[0].Operation = AclOperationWrite testRequest(t, "delete request", req, aclDeleteRequestv1) } golang-github-ibm-sarama-1.43.2/acl_delete_response.go000066400000000000000000000065731461256741300227110ustar00rootroot00000000000000package sarama import "time" // DeleteAclsResponse is a delete acl response type DeleteAclsResponse struct { Version int16 ThrottleTime time.Duration FilterResponses []*FilterResponse } func (d *DeleteAclsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { return err } for _, filterResponse := range d.FilterResponses { if err := filterResponse.encode(pe, d.Version); err != nil { return err } } return nil } func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } d.FilterResponses = make([]*FilterResponse, n) for i := 0; i < n; i++ { d.FilterResponses[i] = new(FilterResponse) if err := d.FilterResponses[i].decode(pd, version); err != nil { return err } } return nil } func (d *DeleteAclsResponse) key() int16 { return 31 } func (d *DeleteAclsResponse) version() int16 { return d.Version } func (d *DeleteAclsResponse) headerVersion() int16 { return 0 } func (d *DeleteAclsResponse) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *DeleteAclsResponse) throttleTime() time.Duration { return r.ThrottleTime } // FilterResponse is a filter response type type FilterResponse struct { Err KError ErrMsg *string MatchingAcls []*MatchingAcl } func (f *FilterResponse) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(f.Err)) if err := pe.putNullableString(f.ErrMsg); err != nil { return err } if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { return err } for _, matchingAcl := range f.MatchingAcls { if err := matchingAcl.encode(pe, version); err != nil { return err } } return nil } func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { kerr, err := pd.getInt16() if err != nil { return err } f.Err = KError(kerr) if f.ErrMsg, err = pd.getNullableString(); err != nil { return err } n, err := pd.getArrayLength() if err != nil { return err } f.MatchingAcls = make([]*MatchingAcl, n) for i := 0; i < n; i++ { f.MatchingAcls[i] = new(MatchingAcl) if err := f.MatchingAcls[i].decode(pd, version); err != nil { return err } } return nil } // MatchingAcl is a matching acl type type MatchingAcl struct { Err KError ErrMsg *string Resource Acl } func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(m.Err)) if err := pe.putNullableString(m.ErrMsg); err != nil { return err } if err := m.Resource.encode(pe, version); err != nil { return err } if err := m.Acl.encode(pe); err != nil { return err } return nil } func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { kerr, err := pd.getInt16() if err != nil { return err } m.Err = KError(kerr) if m.ErrMsg, err = pd.getNullableString(); err != nil { return err } if err := m.Resource.decode(pd, version); err != nil { return err } if err := m.Acl.decode(pd, version); err != nil { return err } return nil } golang-github-ibm-sarama-1.43.2/acl_delete_response_test.go000066400000000000000000000015051461256741300237360ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var deleteAclsResponse = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, 0, 0, // no error 255, 255, // no error message 0, 0, 0, 1, // 1 matching acl 0, 0, // no error 255, 255, // no error message 2, // resource type 0, 5, 't', 'o', 'p', 'i', 'c', 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 4, 3, } func TestDeleteAclsResponse(t *testing.T) { resp := &DeleteAclsResponse{ ThrottleTime: 100 * time.Millisecond, FilterResponses: []*FilterResponse{{ MatchingAcls: []*MatchingAcl{{ Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "topic"}, Acl: Acl{Principal: "principal", Host: "host", Operation: AclOperationWrite, PermissionType: AclPermissionAllow}, }}, }}, } testResponse(t, "", resp, deleteAclsResponse) } golang-github-ibm-sarama-1.43.2/acl_describe_request.go000066400000000000000000000015751461256741300230560ustar00rootroot00000000000000package sarama // DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter } func (d *DescribeAclsRequest) encode(pe packetEncoder) error { d.AclFilter.Version = d.Version return d.AclFilter.encode(pe) } func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { d.Version = int(version) d.AclFilter.Version = int(version) return d.AclFilter.decode(pd, version) } func (d *DescribeAclsRequest) key() int16 { return 29 } func (d *DescribeAclsRequest) version() int16 { return int16(d.Version) } func (d *DescribeAclsRequest) headerVersion() int16 { return 1 } func (d *DescribeAclsRequest) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } golang-github-ibm-sarama-1.43.2/acl_describe_request_test.go000066400000000000000000000027251461256741300241130ustar00rootroot00000000000000package sarama import ( "testing" ) var ( aclDescribeRequest = []byte{ 2, // resource type 0, 5, 't', 'o', 'p', 'i', 'c', 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 5, // acl operation 3, // acl permission type } aclDescribeRequestV1 = []byte{ 2, // resource type 0, 5, 't', 'o', 'p', 'i', 'c', 1, // any Type 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 5, // acl operation 3, // acl permission type } ) func TestAclDescribeRequestV0(t *testing.T) { resourcename := "topic" principal := "principal" host := "host" req := &DescribeAclsRequest{ AclFilter: AclFilter{ ResourceType: AclResourceTopic, ResourceName: &resourcename, Principal: &principal, Host: &host, Operation: AclOperationCreate, PermissionType: AclPermissionAllow, }, } testRequest(t, "", req, aclDescribeRequest) } func TestAclDescribeRequestV1(t *testing.T) { resourcename := "topic" principal := "principal" host := "host" req := &DescribeAclsRequest{ Version: 1, AclFilter: AclFilter{ ResourceType: AclResourceTopic, ResourceName: &resourcename, ResourcePatternTypeFilter: AclPatternAny, Principal: &principal, Host: &host, Operation: AclOperationCreate, PermissionType: AclPermissionAllow, }, } testRequest(t, "", req, aclDescribeRequestV1) } golang-github-ibm-sarama-1.43.2/acl_describe_response.go000066400000000000000000000035621461256741300232220ustar00rootroot00000000000000package sarama import "time" // DescribeAclsResponse is a describe acl response type type DescribeAclsResponse struct { Version int16 ThrottleTime time.Duration Err KError ErrMsg *string ResourceAcls []*ResourceAcls } func (d *DescribeAclsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) pe.putInt16(int16(d.Err)) if err := pe.putNullableString(d.ErrMsg); err != nil { return err } if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { return err } for _, resourceAcl := range d.ResourceAcls { if err := resourceAcl.encode(pe, d.Version); err != nil { return err } } return nil } func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond kerr, err := pd.getInt16() if err != nil { return err } d.Err = KError(kerr) errmsg, err := pd.getString() if err != nil { return err } if errmsg != "" { d.ErrMsg = &errmsg } n, err := pd.getArrayLength() if err != nil { return err } d.ResourceAcls = make([]*ResourceAcls, n) for i := 0; i < n; i++ { d.ResourceAcls[i] = new(ResourceAcls) if err := d.ResourceAcls[i].decode(pd, version); err != nil { return err } } return nil } func (d *DescribeAclsResponse) key() int16 { return 29 } func (d *DescribeAclsResponse) version() int16 { return d.Version } func (d *DescribeAclsResponse) headerVersion() int16 { return 0 } func (d *DescribeAclsResponse) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *DescribeAclsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/acl_describe_response_test.go000066400000000000000000000016511461256741300242560ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var aclDescribeResponseError = []byte{ 0, 0, 0, 100, 0, 8, // error 0, 5, 'e', 'r', 'r', 'o', 'r', 0, 0, 0, 1, // 1 resource 2, // cluster type 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, // 1 acl 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', 0, 4, 'h', 'o', 's', 't', 4, // write 3, // allow } func TestAclDescribeResponse(t *testing.T) { errmsg := "error" resp := &DescribeAclsResponse{ ThrottleTime: 100 * time.Millisecond, Err: ErrBrokerNotAvailable, ErrMsg: &errmsg, ResourceAcls: []*ResourceAcls{{ Resource: Resource{ ResourceName: "topic", ResourceType: AclResourceTopic, }, Acls: []*Acl{ { Principal: "principal", Host: "host", Operation: AclOperationWrite, PermissionType: AclPermissionAllow, }, }, }}, } testResponse(t, "describe", resp, aclDescribeResponseError) } golang-github-ibm-sarama-1.43.2/acl_filter.go000066400000000000000000000031551461256741300210070ustar00rootroot00000000000000package sarama type AclFilter struct { Version int ResourceType AclResourceType ResourceName *string ResourcePatternTypeFilter AclResourcePatternType Principal *string Host *string Operation AclOperation PermissionType AclPermissionType } func (a *AclFilter) encode(pe packetEncoder) error { pe.putInt8(int8(a.ResourceType)) if err := pe.putNullableString(a.ResourceName); err != nil { return err } if a.Version == 1 { pe.putInt8(int8(a.ResourcePatternTypeFilter)) } if err := pe.putNullableString(a.Principal); err != nil { return err } if err := pe.putNullableString(a.Host); err != nil { return err } pe.putInt8(int8(a.Operation)) pe.putInt8(int8(a.PermissionType)) return nil } func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { resourceType, err := pd.getInt8() if err != nil { return err } a.ResourceType = AclResourceType(resourceType) if a.ResourceName, err = pd.getNullableString(); err != nil { return err } if a.Version == 1 { pattern, err := pd.getInt8() if err != nil { return err } a.ResourcePatternTypeFilter = AclResourcePatternType(pattern) } if a.Principal, err = pd.getNullableString(); err != nil { return err } if a.Host, err = pd.getNullableString(); err != nil { return err } operation, err := pd.getInt8() if err != nil { return err } a.Operation = AclOperation(operation) permissionType, err := pd.getInt8() if err != nil { return err } a.PermissionType = AclPermissionType(permissionType) return nil } golang-github-ibm-sarama-1.43.2/acl_types.go000066400000000000000000000151511461256741300206650ustar00rootroot00000000000000package sarama import ( "fmt" "strings" ) type ( AclOperation int AclPermissionType int AclResourceType int AclResourcePatternType int ) // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java const ( AclOperationUnknown AclOperation = iota AclOperationAny AclOperationAll AclOperationRead AclOperationWrite AclOperationCreate AclOperationDelete AclOperationAlter AclOperationDescribe AclOperationClusterAction AclOperationDescribeConfigs AclOperationAlterConfigs AclOperationIdempotentWrite ) func (a *AclOperation) String() string { mapping := map[AclOperation]string{ AclOperationUnknown: "Unknown", AclOperationAny: "Any", AclOperationAll: "All", AclOperationRead: "Read", AclOperationWrite: "Write", AclOperationCreate: "Create", AclOperationDelete: "Delete", AclOperationAlter: "Alter", AclOperationDescribe: "Describe", AclOperationClusterAction: "ClusterAction", AclOperationDescribeConfigs: "DescribeConfigs", AclOperationAlterConfigs: "AlterConfigs", AclOperationIdempotentWrite: "IdempotentWrite", } s, ok := mapping[*a] if !ok { s = mapping[AclOperationUnknown] } return s } // MarshalText returns the text form of the AclOperation (name without prefix) func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } // UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ "unknown": AclOperationUnknown, "any": AclOperationAny, "all": AclOperationAll, "read": AclOperationRead, "write": AclOperationWrite, "create": AclOperationCreate, "delete": AclOperationDelete, "alter": AclOperationAlter, "describe": AclOperationDescribe, "clusteraction": AclOperationClusterAction, "describeconfigs": AclOperationDescribeConfigs, "alterconfigs": AclOperationAlterConfigs, "idempotentwrite": AclOperationIdempotentWrite, } ao, ok := mapping[normalized] if !ok { *a = AclOperationUnknown return fmt.Errorf("no acl operation with name %s", normalized) } *a = ao return nil } // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java const ( AclPermissionUnknown AclPermissionType = iota AclPermissionAny AclPermissionDeny AclPermissionAllow ) func (a *AclPermissionType) String() string { mapping := map[AclPermissionType]string{ AclPermissionUnknown: "Unknown", AclPermissionAny: "Any", AclPermissionDeny: "Deny", AclPermissionAllow: "Allow", } s, ok := mapping[*a] if !ok { s = mapping[AclPermissionUnknown] } return s } // MarshalText returns the text form of the AclPermissionType (name without prefix) func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } // UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ "unknown": AclPermissionUnknown, "any": AclPermissionAny, "deny": AclPermissionDeny, "allow": AclPermissionAllow, } apt, ok := mapping[normalized] if !ok { *a = AclPermissionUnknown return fmt.Errorf("no acl permission with name %s", normalized) } *a = apt return nil } // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java const ( AclResourceUnknown AclResourceType = iota AclResourceAny AclResourceTopic AclResourceGroup AclResourceCluster AclResourceTransactionalID AclResourceDelegationToken ) func (a *AclResourceType) String() string { mapping := map[AclResourceType]string{ AclResourceUnknown: "Unknown", AclResourceAny: "Any", AclResourceTopic: "Topic", AclResourceGroup: "Group", AclResourceCluster: "Cluster", AclResourceTransactionalID: "TransactionalID", AclResourceDelegationToken: "DelegationToken", } s, ok := mapping[*a] if !ok { s = mapping[AclResourceUnknown] } return s } // MarshalText returns the text form of the AclResourceType (name without prefix) func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } // UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ "unknown": AclResourceUnknown, "any": AclResourceAny, "topic": AclResourceTopic, "group": AclResourceGroup, "cluster": AclResourceCluster, "transactionalid": AclResourceTransactionalID, "delegationtoken": AclResourceDelegationToken, } art, ok := mapping[normalized] if !ok { *a = AclResourceUnknown return fmt.Errorf("no acl resource with name %s", normalized) } *a = art return nil } // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java const ( AclPatternUnknown AclResourcePatternType = iota AclPatternAny AclPatternMatch AclPatternLiteral AclPatternPrefixed ) func (a *AclResourcePatternType) String() string { mapping := map[AclResourcePatternType]string{ AclPatternUnknown: "Unknown", AclPatternAny: "Any", AclPatternMatch: "Match", AclPatternLiteral: "Literal", AclPatternPrefixed: "Prefixed", } s, ok := mapping[*a] if !ok { s = mapping[AclPatternUnknown] } return s } // MarshalText returns the text form of the AclResourcePatternType (name without prefix) func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } // UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ "unknown": AclPatternUnknown, "any": AclPatternAny, "match": AclPatternMatch, "literal": AclPatternLiteral, "prefixed": AclPatternPrefixed, } arpt, ok := mapping[normalized] if !ok { *a = AclPatternUnknown return fmt.Errorf("no acl resource pattern with name %s", normalized) } *a = arpt return nil } golang-github-ibm-sarama-1.43.2/acl_types_test.go000066400000000000000000000034301461256741300217210ustar00rootroot00000000000000package sarama import ( "testing" ) func TestAclOperationTextMarshal(t *testing.T) { for i := AclOperationUnknown; i <= AclOperationIdempotentWrite; i++ { text, err := i.MarshalText() if err != nil { t.Errorf("couldn't marshal %d to text: %s", i, err) } var got AclOperation err = got.UnmarshalText(text) if err != nil { t.Errorf("couldn't unmarshal %s to acl operation: %s", text, err) } if got != i { t.Errorf("got %d, want %d", got, i) } } } func TestAclPermissionTypeTextMarshal(t *testing.T) { for i := AclPermissionUnknown; i <= AclPermissionAllow; i++ { text, err := i.MarshalText() if err != nil { t.Errorf("couldn't marshal %d to text: %s", i, err) } var got AclPermissionType err = got.UnmarshalText(text) if err != nil { t.Errorf("couldn't unmarshal %s to acl permission: %s", text, err) } if got != i { t.Errorf("got %d, want %d", got, i) } } } func TestAclResourceTypeTextMarshal(t *testing.T) { for i := AclResourceUnknown; i <= AclResourceTransactionalID; i++ { text, err := i.MarshalText() if err != nil { t.Errorf("couldn't marshal %d to text: %s", i, err) } var got AclResourceType err = got.UnmarshalText(text) if err != nil { t.Errorf("couldn't unmarshal %s to acl resource: %s", text, err) } if got != i { t.Errorf("got %d, want %d", got, i) } } } func TestAclResourcePatternTypeTextMarshal(t *testing.T) { for i := AclPatternUnknown; i <= AclPatternPrefixed; i++ { text, err := i.MarshalText() if err != nil { t.Errorf("couldn't marshal %d to text: %s", i, err) } var got AclResourcePatternType err = got.UnmarshalText(text) if err != nil { t.Errorf("couldn't unmarshal %s to acl resource pattern: %s", text, err) } if got != i { t.Errorf("got %d, want %d", got, i) } } } golang-github-ibm-sarama-1.43.2/add_offsets_to_txn_request.go000066400000000000000000000025731461256741300243320ustar00rootroot00000000000000package sarama // AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 GroupID string } func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { if err := pe.putString(a.TransactionalID); err != nil { return err } pe.putInt64(a.ProducerID) pe.putInt16(a.ProducerEpoch) if err := pe.putString(a.GroupID); err != nil { return err } return nil } func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { if a.TransactionalID, err = pd.getString(); err != nil { return err } if a.ProducerID, err = pd.getInt64(); err != nil { return err } if a.ProducerEpoch, err = pd.getInt16(); err != nil { return err } if a.GroupID, err = pd.getString(); err != nil { return err } return nil } func (a *AddOffsetsToTxnRequest) key() int16 { return 25 } func (a *AddOffsetsToTxnRequest) version() int16 { return a.Version } func (a *AddOffsetsToTxnRequest) headerVersion() int16 { return 1 } func (a *AddOffsetsToTxnRequest) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_7_0_0 } } golang-github-ibm-sarama-1.43.2/add_offsets_to_txn_request_test.go000066400000000000000000000006331461256741300253640ustar00rootroot00000000000000package sarama import "testing" var addOffsetsToTxnRequest = []byte{ 0, 3, 't', 'x', 'n', 0, 0, 0, 0, 0, 0, 31, 64, 0, 0, 0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd', } func TestAddOffsetsToTxnRequest(t *testing.T) { req := &AddOffsetsToTxnRequest{ TransactionalID: "txn", ProducerID: 8000, ProducerEpoch: 0, GroupID: "groupid", } testRequest(t, "", req, addOffsetsToTxnRequest) } golang-github-ibm-sarama-1.43.2/add_offsets_to_txn_response.go000066400000000000000000000024321461256741300244720ustar00rootroot00000000000000package sarama import ( "time" ) // AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { Version int16 ThrottleTime time.Duration Err KError } func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) pe.putInt16(int16(a.Err)) return nil } func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond kerr, err := pd.getInt16() if err != nil { return err } a.Err = KError(kerr) return nil } func (a *AddOffsetsToTxnResponse) key() int16 { return 25 } func (a *AddOffsetsToTxnResponse) version() int16 { return a.Version } func (a *AddOffsetsToTxnResponse) headerVersion() int16 { return 0 } func (a *AddOffsetsToTxnResponse) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_7_0_0 } } func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/add_offsets_to_txn_response_test.go000066400000000000000000000005161461256741300255320ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var addOffsetsToTxnResponse = []byte{ 0, 0, 0, 100, 0, 47, } func TestAddOffsetsToTxnResponse(t *testing.T) { resp := &AddOffsetsToTxnResponse{ ThrottleTime: 100 * time.Millisecond, Err: ErrInvalidProducerEpoch, } testResponse(t, "", resp, addOffsetsToTxnResponse) } golang-github-ibm-sarama-1.43.2/add_partitions_to_txn_request.go000066400000000000000000000035211461256741300250470ustar00rootroot00000000000000package sarama // AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 TopicPartitions map[string][]int32 } func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { if err := pe.putString(a.TransactionalID); err != nil { return err } pe.putInt64(a.ProducerID) pe.putInt16(a.ProducerEpoch) if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { return err } for topic, partitions := range a.TopicPartitions { if err := pe.putString(topic); err != nil { return err } if err := pe.putInt32Array(partitions); err != nil { return err } } return nil } func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { if a.TransactionalID, err = pd.getString(); err != nil { return err } if a.ProducerID, err = pd.getInt64(); err != nil { return err } if a.ProducerEpoch, err = pd.getInt16(); err != nil { return err } n, err := pd.getArrayLength() if err != nil { return err } a.TopicPartitions = make(map[string][]int32) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } partitions, err := pd.getInt32Array() if err != nil { return err } a.TopicPartitions[topic] = partitions } return nil } func (a *AddPartitionsToTxnRequest) key() int16 { return 24 } func (a *AddPartitionsToTxnRequest) version() int16 { return a.Version } func (a *AddPartitionsToTxnRequest) headerVersion() int16 { return 1 } func (a *AddPartitionsToTxnRequest) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 default: return V0_11_0_0 } } golang-github-ibm-sarama-1.43.2/add_partitions_to_txn_request_test.go000066400000000000000000000010141461256741300261010ustar00rootroot00000000000000package sarama import "testing" var addPartitionsToTxnRequest = []byte{ 0, 3, 't', 'x', 'n', 0, 0, 0, 0, 0, 0, 31, 64, // ProducerID 0, 0, 0, 0, // ProducerEpoch 0, 1, // 1 topic 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, 0, 0, 0, 1, } func TestAddPartitionsToTxnRequest(t *testing.T) { req := &AddPartitionsToTxnRequest{ TransactionalID: "txn", ProducerID: 8000, ProducerEpoch: 0, TopicPartitions: map[string][]int32{ "topic": {1}, }, } testRequest(t, "", req, addPartitionsToTxnRequest) } golang-github-ibm-sarama-1.43.2/add_partitions_to_txn_response.go000066400000000000000000000050121461256741300252120ustar00rootroot00000000000000package sarama import ( "time" ) // AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { Version int16 ThrottleTime time.Duration Errors map[string][]*PartitionError } func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(a.Errors)); err != nil { return err } for topic, e := range a.Errors { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(e)); err != nil { return err } for _, partitionError := range e { if err := partitionError.encode(pe); err != nil { return err } } } return nil } func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { a.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } a.Errors = make(map[string][]*PartitionError) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } m, err := pd.getArrayLength() if err != nil { return err } a.Errors[topic] = make([]*PartitionError, m) for j := 0; j < m; j++ { a.Errors[topic][j] = new(PartitionError) if err := a.Errors[topic][j].decode(pd, version); err != nil { return err } } } return nil } func (a *AddPartitionsToTxnResponse) key() int16 { return 24 } func (a *AddPartitionsToTxnResponse) version() int16 { return a.Version } func (a *AddPartitionsToTxnResponse) headerVersion() int16 { return 0 } func (a *AddPartitionsToTxnResponse) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration { return r.ThrottleTime } // PartitionError is a partition error type type PartitionError struct { Partition int32 Err KError } func (p *PartitionError) encode(pe packetEncoder) error { pe.putInt32(p.Partition) pe.putInt16(int16(p.Err)) return nil } func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { if p.Partition, err = pd.getInt32(); err != nil { return err } kerr, err := pd.getInt16() if err != nil { return err } p.Err = KError(kerr) return nil } golang-github-ibm-sarama-1.43.2/add_partitions_to_txn_response_test.go000066400000000000000000000010351461256741300262520ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var addPartitionsToTxnResponse = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, // 1 partition error 0, 0, 0, 2, // partition 2 0, 48, // error } func TestAddPartitionsToTxnResponse(t *testing.T) { resp := &AddPartitionsToTxnResponse{ ThrottleTime: 100 * time.Millisecond, Errors: map[string][]*PartitionError{ "topic": {{ Err: ErrInvalidTxnState, Partition: 2, }}, }, } testResponse(t, "", resp, addPartitionsToTxnResponse) } golang-github-ibm-sarama-1.43.2/admin.go000066400000000000000000001062331461256741300177740ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "math/rand" "strconv" "sync" "time" ) // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, // brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. // Methods with stricter requirements will specify the minimum broker version required. // You MUST call Close() on a client to avoid leaks type ClusterAdmin interface { // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher. // It may take several seconds after CreateTopic returns success for all the brokers // to become aware that the topic has been created. During this time, listTopics // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error // List the topics available in the cluster with the default options. ListTopics() (map[string]TopicDetail, error) // Describe some topics in the cluster. DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) // Delete a topic. It may take several seconds after the DeleteTopic to returns success // and for all the brokers to become aware that the topics are gone. // During this time, listTopics may continue to return information about the deleted topic. // If delete.topic.enable is false on the brokers, deleteTopic will mark // the topic for deletion, but not actually delete them. // This operation is supported by brokers with version 0.10.1.0 or higher. DeleteTopic(topic string) error // Increase the number of partitions of the topics according to the corresponding values. // If partitions are increased for a topic that has a key, the partition logic or ordering of // the messages will be affected. It may take several seconds after this method returns // success for all the brokers to become aware that the partitions have been created. // During this time, ClusterAdmin#describeTopics may not return information about the // new partitions. This operation is supported by brokers with version 1.0.0 or higher. CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error // Alter the replica assignment for partitions. // This operation is supported by brokers with version 2.4.0.0 or higher. AlterPartitionReassignments(topic string, assignment [][]int32) error // Provides info on ongoing partitions replica reassignments. // This operation is supported by brokers with version 2.4.0.0 or higher. ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) // Delete records whose offset is smaller than the given offset of the corresponding partition. // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteRecords(topic string, partitionOffsets map[int32]int64) error // Get the configuration for the specified resources. // The returned configuration includes default values and the Default is true // can be used to distinguish them from user supplied values. // Config entries where ReadOnly is true cannot be updated. // The value of config entries where Sensitive is true is always nil so // sensitive information is not disclosed. // This operation is supported by brokers with version 0.11.0.0 or higher. DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) // Update the configuration for the specified resources with the default options. // This operation is supported by brokers with version 0.11.0.0 or higher. // The resources with their configs (topic is the only resource type with configs // that can be updated currently Updates are not transactional so they may succeed // for some resources while fail for others. The configs for a particular resource are updated automatically. AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error // IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options. // This operation is supported by brokers with version 2.3.0.0 or higher. // Updates are not transactional so they may succeed for some resources while fail for others. // The configs for a particular resource are updated automatically. IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error // Creates an access control list (ACL) which is bound to a specific resource. // This operation is not transactional so it may succeed or fail. // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. // Deprecated: Use CreateACLs instead. CreateACL(resource Resource, acl Acl) error // Creates access control lists (ACLs) which are bound to specific resources. // This operation is not transactional so it may succeed for some ACLs while fail for others. // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. CreateACLs([]*ResourceAcls) error // Lists access control lists (ACLs) according to the supplied filter. // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls // This operation is supported by brokers with version 0.11.0.0 or higher. ListAcls(filter AclFilter) ([]ResourceAcls, error) // Deletes access control lists (ACLs) according to the supplied filters. // This operation is not transactional so it may succeed for some ACLs while fail for others. // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) // List the consumer groups available in the cluster. ListConsumerGroups() (map[string]string, error) // Describe the given consumer groups. DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) // List the consumer group offsets available in the cluster. ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) // Deletes a consumer group offset DeleteConsumerGroupOffset(group string, topic string, partition int32) error // Delete a consumer group. DeleteConsumerGroup(group string) error // Get information about the nodes in the cluster DescribeCluster() (brokers []*Broker, controllerID int32, err error) // Get information about all log directories on the given set of brokers DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) // Get information about SCRAM users DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) // Delete SCRAM users DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) // Upsert SCRAM users UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) // Get client quota configurations corresponding to the specified filter. // This operation is supported by brokers with version 2.6.0.0 or higher. DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) // Alters client quota configurations with the specified alterations. // This operation is supported by brokers with version 2.6.0.0 or higher. AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error // Controller returns the cluster controller broker. It will return a // locally cached value if it's available. Controller() (*Broker, error) // Remove members from the consumer group by given member identities. // This operation is supported by brokers with version 2.3 or higher // This is for static membership feature. KIP-345 RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) // Close shuts down the admin and closes underlying client. Close() error } type clusterAdmin struct { client Client conf *Config } // NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration. func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { client, err := NewClient(addrs, conf) if err != nil { return nil, err } admin, err := NewClusterAdminFromClient(client) if err != nil { client.Close() } return admin, err } // NewClusterAdminFromClient creates a new ClusterAdmin using the given client. // Note that underlying client will also be closed on admin's Close() call. func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { // make sure we can retrieve the controller _, err := client.Controller() if err != nil { return nil, err } ca := &clusterAdmin{ client: client, conf: client.Config(), } return ca, nil } func (ca *clusterAdmin) Close() error { return ca.client.Close() } func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } // isErrNotController returns `true` if the given error type unwraps to an // `ErrNotController` response from Kafka func isErrNotController(err error) bool { return errors.Is(err, ErrNotController) } // retryOnError will repeatedly call the given (error-returning) func in the // case that its response is non-nil and retryable (as determined by the // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { err := fn() attemptsRemaining-- if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) } } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { if topic == "" { return ErrInvalidTopic } if detail == nil { return errors.New("you must specify topic details") } topicDetails := make(map[string]*TopicDetail) topicDetails[topic] = detail request := &CreateTopicsRequest{ TopicDetails: topicDetails, ValidateOnly: validateOnly, Timeout: ca.conf.Admin.Timeout, } if ca.conf.Version.IsAtLeast(V2_0_0_0) { // Version 3 is the same as version 2 (brokers response before throttling) request.Version = 3 } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { // Version 2 is the same as version 1 (response has ThrottleTime) request.Version = 2 } else if ca.conf.Version.IsAtLeast(V0_10_2_0) { // Version 1 adds validateOnly. request.Version = 1 } return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } rsp, err := b.CreateTopics(request) if err != nil { return err } topicErr, ok := rsp.TopicErrors[topic] if !ok { return ErrIncompleteResponse } if !errors.Is(topicErr.Err, ErrNoError) { if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr } return nil }) } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { var response *MetadataResponse err = ca.retryOnError(isErrNotController, func() error { controller, err := ca.Controller() if err != nil { return err } request := NewMetadataRequest(ca.conf.Version, topics) response, err = controller.GetMetadata(request) if isErrNotController(err) { _, _ = ca.refreshController() } return err }) if err != nil { return nil, err } return response.Topics, nil } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { var response *MetadataResponse err = ca.retryOnError(isErrNotController, func() error { controller, err := ca.Controller() if err != nil { return err } request := NewMetadataRequest(ca.conf.Version, nil) response, err = controller.GetMetadata(request) if isErrNotController(err) { _, _ = ca.refreshController() } return err }) if err != nil { return nil, int32(0), err } return response.Brokers, response.ControllerID, nil } func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { brokers := ca.client.Brokers() for _, b := range brokers { if b.ID() == id { return b, nil } } return nil, fmt.Errorf("could not find broker id %d", id) } func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { brokers := ca.client.Brokers() if len(brokers) > 0 { index := rand.Intn(len(brokers)) return brokers[index], nil } return nil, errors.New("no available broker") } func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { // In order to build TopicDetails we need to first get the list of all // topics using a MetadataRequest and then get their configs using a // DescribeConfigsRequest request. To avoid sending many requests to the // broker, we use a single DescribeConfigsRequest. // Send the all-topic MetadataRequest b, err := ca.findAnyBroker() if err != nil { return nil, err } _ = b.Open(ca.client.Config()) metadataReq := NewMetadataRequest(ca.conf.Version, nil) metadataResp, err := b.GetMetadata(metadataReq) if err != nil { return nil, err } topicsDetailsMap := make(map[string]TopicDetail) var describeConfigsResources []*ConfigResource for _, topic := range metadataResp.Topics { topicDetails := TopicDetail{ NumPartitions: int32(len(topic.Partitions)), } if len(topic.Partitions) > 0 { topicDetails.ReplicaAssignment = map[int32][]int32{} for _, partition := range topic.Partitions { topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas } topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) } topicsDetailsMap[topic.Name] = topicDetails // we populate the resources we want to describe from the MetadataResponse topicResource := ConfigResource{ Type: TopicResource, Name: topic.Name, } describeConfigsResources = append(describeConfigsResources, &topicResource) } // Send the DescribeConfigsRequest describeConfigsReq := &DescribeConfigsRequest{ Resources: describeConfigsResources, } if ca.conf.Version.IsAtLeast(V1_1_0_0) { describeConfigsReq.Version = 1 } if ca.conf.Version.IsAtLeast(V2_0_0_0) { describeConfigsReq.Version = 2 } describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) if err != nil { return nil, err } for _, resource := range describeConfigsResp.Resources { topicDetails := topicsDetailsMap[resource.Name] topicDetails.ConfigEntries = make(map[string]*string) for _, entry := range resource.Configs { entry := entry // only include non-default non-sensitive config // (don't actually think topic config will ever be sensitive) if entry.Default || entry.Sensitive { continue } topicDetails.ConfigEntries[entry.Name] = &entry.Value } topicsDetailsMap[resource.Name] = topicDetails } return topicsDetailsMap, nil } func (ca *clusterAdmin) DeleteTopic(topic string) error { if topic == "" { return ErrInvalidTopic } request := &DeleteTopicsRequest{ Topics: []string{topic}, Timeout: ca.conf.Admin.Timeout, } // Versions 0, 1, 2, and 3 are the same. if ca.conf.Version.IsAtLeast(V2_1_0_0) { request.Version = 3 } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 2 } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } rsp, err := b.DeleteTopics(request) if err != nil { return err } topicErr, ok := rsp.TopicErrorCodes[topic] if !ok { return ErrIncompleteResponse } if !errors.Is(topicErr, ErrNoError) { if errors.Is(topicErr, ErrNotController) { _, _ = ca.refreshController() } return topicErr } return nil }) } func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { if topic == "" { return ErrInvalidTopic } topicPartitions := make(map[string]*TopicPartition) topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment} request := &CreatePartitionsRequest{ TopicPartitions: topicPartitions, Timeout: ca.conf.Admin.Timeout, ValidateOnly: validateOnly, } if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } rsp, err := b.CreatePartitions(request) if err != nil { return err } topicErr, ok := rsp.TopicPartitionErrors[topic] if !ok { return ErrIncompleteResponse } if !errors.Is(topicErr.Err, ErrNoError) { if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr } return nil }) } func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { if topic == "" { return ErrInvalidTopic } request := &AlterPartitionReassignmentsRequest{ TimeoutMs: int32(60000), Version: int16(0), } for i := 0; i < len(assignment); i++ { request.AddBlock(topic, int32(i), assignment[i]) } return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } errs := make([]error, 0) rsp, err := b.AlterPartitionReassignments(request) if err != nil { errs = append(errs, err) } else { if rsp.ErrorCode > 0 { errs = append(errs, rsp.ErrorCode) } for topic, topicErrors := range rsp.Errors { for partition, partitionError := range topicErrors { if !errors.Is(partitionError.errorCode, ErrNoError) { errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode)) } } } } if len(errs) > 0 { return Wrap(ErrReassignPartitions, errs...) } return nil }) } func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { if topic == "" { return nil, ErrInvalidTopic } request := &ListPartitionReassignmentsRequest{ TimeoutMs: int32(60000), Version: int16(0), } request.AddBlock(topic, partitions) var rsp *ListPartitionReassignmentsResponse err = ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } _ = b.Open(ca.client.Config()) rsp, err = b.ListPartitionReassignments(request) if isErrNotController(err) { _, _ = ca.refreshController() } return err }) if err == nil && rsp != nil { return rsp.TopicStatus, nil } else { return nil, err } } func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { if topic == "" { return ErrInvalidTopic } errs := make([]error, 0) partitionPerBroker := make(map[*Broker][]int32) for partition := range partitionOffsets { broker, err := ca.client.Leader(topic, partition) if err != nil { errs = append(errs, err) continue } partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) } for broker, partitions := range partitionPerBroker { topics := make(map[string]*DeleteRecordsRequestTopic) recordsToDelete := make(map[int32]int64) for _, p := range partitions { recordsToDelete[p] = partitionOffsets[p] } topics[topic] = &DeleteRecordsRequestTopic{ PartitionOffsets: recordsToDelete, } request := &DeleteRecordsRequest{ Topics: topics, Timeout: ca.conf.Admin.Timeout, } if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } rsp, err := broker.DeleteRecords(request) if err != nil { errs = append(errs, err) continue } deleteRecordsResponseTopic, ok := rsp.Topics[topic] if !ok { errs = append(errs, ErrIncompleteResponse) continue } for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) { errs = append(errs, deleteRecordsResponsePartition.Err) continue } } } if len(errs) > 0 { return Wrap(ErrDeleteRecords, errs...) } // todo since we are dealing with couple of partitions it would be good if we return slice of errors // for each partition instead of one error return nil } // Returns a bool indicating whether the resource request needs to go to a // specific broker func dependsOnSpecificNode(resource ConfigResource) bool { return (resource.Type == BrokerResource && resource.Name != "") || resource.Type == BrokerLoggerResource } func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { var entries []ConfigEntry var resources []*ConfigResource resources = append(resources, &resource) request := &DescribeConfigsRequest{ Resources: resources, } if ca.conf.Version.IsAtLeast(V1_1_0_0) { request.Version = 1 } if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 2 } var ( b *Broker err error ) // DescribeConfig of broker/broker logger must be sent to the broker in question if dependsOnSpecificNode(resource) { var id int64 id, err = strconv.ParseInt(resource.Name, 10, 32) if err != nil { return nil, err } b, err = ca.findBroker(int32(id)) } else { b, err = ca.findAnyBroker() } if err != nil { return nil, err } _ = b.Open(ca.client.Config()) rsp, err := b.DescribeConfigs(request) if err != nil { return nil, err } for _, rspResource := range rsp.Resources { if rspResource.Name == resource.Name { if rspResource.ErrorCode != 0 { return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) } } } return entries, nil } func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { var resources []*AlterConfigsResource resources = append(resources, &AlterConfigsResource{ Type: resourceType, Name: name, ConfigEntries: entries, }) request := &AlterConfigsRequest{ Resources: resources, ValidateOnly: validateOnly, } if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } var ( b *Broker err error ) // AlterConfig of broker/broker logger must be sent to the broker in question if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { var id int64 id, err = strconv.ParseInt(name, 10, 32) if err != nil { return err } b, err = ca.findBroker(int32(id)) } else { b, err = ca.findAnyBroker() } if err != nil { return err } _ = b.Open(ca.client.Config()) rsp, err := b.AlterConfigs(request) if err != nil { return err } for _, rspResource := range rsp.Resources { if rspResource.Name == name { if rspResource.ErrorCode != 0 { return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } } } return nil } func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error { var resources []*IncrementalAlterConfigsResource resources = append(resources, &IncrementalAlterConfigsResource{ Type: resourceType, Name: name, ConfigEntries: entries, }) request := &IncrementalAlterConfigsRequest{ Resources: resources, ValidateOnly: validateOnly, } var ( b *Broker err error ) // AlterConfig of broker/broker logger must be sent to the broker in question if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { var id int64 id, err = strconv.ParseInt(name, 10, 32) if err != nil { return err } b, err = ca.findBroker(int32(id)) } else { b, err = ca.findAnyBroker() } if err != nil { return err } _ = b.Open(ca.client.Config()) rsp, err := b.IncrementalAlterConfigs(request) if err != nil { return err } for _, rspResource := range rsp.Resources { if rspResource.Name == name { if rspResource.ErrorMsg != "" { return errors.New(rspResource.ErrorMsg) } if rspResource.ErrorCode != 0 { return KError(rspResource.ErrorCode) } } } return nil } func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { var acls []*AclCreation acls = append(acls, &AclCreation{resource, acl}) request := &CreateAclsRequest{AclCreations: acls} if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } b, err := ca.Controller() if err != nil { return err } _, err = b.CreateAcls(request) return err } func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error { var acls []*AclCreation for _, resourceACL := range resourceACLs { for _, acl := range resourceACL.Acls { acls = append(acls, &AclCreation{resourceACL.Resource, *acl}) } } request := &CreateAclsRequest{AclCreations: acls} if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } b, err := ca.Controller() if err != nil { return err } _, err = b.CreateAcls(request) return err } func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { request := &DescribeAclsRequest{AclFilter: filter} if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } b, err := ca.Controller() if err != nil { return nil, err } rsp, err := b.DescribeAcls(request) if err != nil { return nil, err } var lAcls []ResourceAcls for _, rAcl := range rsp.ResourceAcls { lAcls = append(lAcls, *rAcl) } return lAcls, nil } func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) { var filters []*AclFilter filters = append(filters, &filter) request := &DeleteAclsRequest{Filters: filters} if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } b, err := ca.Controller() if err != nil { return nil, err } rsp, err := b.DeleteAcls(request) if err != nil { return nil, err } var mAcls []MatchingAcl for _, fr := range rsp.FilterResponses { for _, mACL := range fr.MatchingAcls { mAcls = append(mAcls, *mACL) } } return mAcls, nil } func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { groupsPerBroker := make(map[*Broker][]string) for _, group := range groups { controller, err := ca.client.Coordinator(group) if err != nil { return nil, err } groupsPerBroker[controller] = append(groupsPerBroker[controller], group) } for broker, brokerGroups := range groupsPerBroker { describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } if ca.conf.Version.IsAtLeast(V2_4_0_0) { // Starting in version 4, the response will include group.instance.id info for members. describeReq.Version = 4 } else if ca.conf.Version.IsAtLeast(V2_3_0_0) { // Starting in version 3, authorized operations can be requested. describeReq.Version = 3 } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { // Version 2 is the same as version 0. describeReq.Version = 2 } else if ca.conf.Version.IsAtLeast(V1_1_0_0) { // Version 1 is the same as version 0. describeReq.Version = 1 } response, err := broker.DescribeGroups(describeReq) if err != nil { return nil, err } result = append(result, response.Groups...) } return result, nil } func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { allGroups = make(map[string]string) // Query brokers in parallel, since we have to query *all* brokers brokers := ca.client.Brokers() groupMaps := make(chan map[string]string, len(brokers)) errChan := make(chan error, len(brokers)) wg := sync.WaitGroup{} for _, b := range brokers { wg.Add(1) go func(b *Broker, conf *Config) { defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened request := &ListGroupsRequest{} if ca.conf.Version.IsAtLeast(V2_6_0_0) { // Version 4 adds the StatesFilter field (KIP-518). request.Version = 4 } else if ca.conf.Version.IsAtLeast(V2_4_0_0) { // Version 3 is the first flexible version. request.Version = 3 } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { // Version 2 is the same as version 0. request.Version = 2 } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { // Version 1 is the same as version 0. request.Version = 1 } response, err := b.ListGroups(request) if err != nil { errChan <- err return } groups := make(map[string]string) for group, typ := range response.Groups { groups[group] = typ } groupMaps <- groups }(b, ca.conf) } wg.Wait() close(groupMaps) close(errChan) for groupMap := range groupMaps { for group, protocolType := range groupMap { allGroups[group] = protocolType } } // Intentionally return only the first error for simplicity err = <-errChan return } func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { coordinator, err := ca.client.Coordinator(group) if err != nil { return nil, err } request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) return coordinator.FetchOffset(request) } func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { coordinator, err := ca.client.Coordinator(group) if err != nil { return err } request := &DeleteOffsetsRequest{ Group: group, partitions: map[string][]int32{ topic: {partition}, }, } resp, err := coordinator.DeleteOffsets(request) if err != nil { return err } if !errors.Is(resp.ErrorCode, ErrNoError) { return resp.ErrorCode } if !errors.Is(resp.Errors[topic][partition], ErrNoError) { return resp.Errors[topic][partition] } return nil } func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { coordinator, err := ca.client.Coordinator(group) if err != nil { return err } request := &DeleteGroupsRequest{ Groups: []string{group}, } if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } resp, err := coordinator.DeleteGroups(request) if err != nil { return err } groupErr, ok := resp.GroupErrorCodes[group] if !ok { return ErrIncompleteResponse } if !errors.Is(groupErr, ErrNoError) { return groupErr } return nil } func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) // Query brokers in parallel, since we may have to query multiple brokers logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) errChan := make(chan error, len(brokerIds)) wg := sync.WaitGroup{} for _, b := range brokerIds { broker, err := ca.findBroker(b) if err != nil { Logger.Printf("Unable to find broker with ID = %v\n", b) continue } wg.Add(1) go func(b *Broker, conf *Config) { defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened request := &DescribeLogDirsRequest{} if ca.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 1 } response, err := b.DescribeLogDirs(request) if err != nil { errChan <- err return } logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) logDirs[b.ID()] = response.LogDirs logDirsMaps <- logDirs }(broker, ca.conf) } wg.Wait() close(logDirsMaps) close(errChan) for logDirsMap := range logDirsMaps { for id, logDirs := range logDirsMap { allLogDirs[id] = logDirs } } // Intentionally return only the first error for simplicity err = <-errChan return } func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) { req := &DescribeUserScramCredentialsRequest{} for _, u := range users { req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{ Name: u, }) } b, err := ca.Controller() if err != nil { return nil, err } rsp, err := b.DescribeUserScramCredentials(req) if err != nil { return nil, err } return rsp.Results, nil } func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) { res, err := ca.AlterUserScramCredentials(upsert, nil) if err != nil { return nil, err } return res, nil } func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { res, err := ca.AlterUserScramCredentials(nil, delete) if err != nil { return nil, err } return res, nil } func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { req := &AlterUserScramCredentialsRequest{ Deletions: d, Upsertions: u, } var rsp *AlterUserScramCredentialsResponse err := ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err } rsp, err = b.AlterUserScramCredentials(req) return err }) if err != nil { return nil, err } return rsp.Results, nil } // Describe All : use an empty/nil components slice + strict = false // Contains components: strict = false // Contains only components: strict = true func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) { request := &DescribeClientQuotasRequest{ Components: components, Strict: strict, } b, err := ca.Controller() if err != nil { return nil, err } rsp, err := b.DescribeClientQuotas(request) if err != nil { return nil, err } if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 { return nil, errors.New(*rsp.ErrorMsg) } if !errors.Is(rsp.ErrorCode, ErrNoError) { return nil, rsp.ErrorCode } return rsp.Entries, nil } func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error { entry := AlterClientQuotasEntry{ Entity: entity, Ops: []ClientQuotasOp{op}, } request := &AlterClientQuotasRequest{ Entries: []AlterClientQuotasEntry{entry}, ValidateOnly: validateOnly, } b, err := ca.Controller() if err != nil { return err } rsp, err := b.AlterClientQuotas(request) if err != nil { return err } for _, entry := range rsp.Entries { if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 { return errors.New(*entry.ErrorMsg) } if !errors.Is(entry.ErrorCode, ErrNoError) { return entry.ErrorCode } } return nil } func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { if !ca.conf.Version.IsAtLeast(V2_4_0_0) { return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") } controller, err := ca.client.Coordinator(groupId) if err != nil { return nil, err } request := &LeaveGroupRequest{ Version: 3, GroupId: groupId, } for _, instanceId := range groupInstanceIds { groupInstanceId := instanceId request.Members = append(request.Members, MemberIdentity{ GroupInstanceId: &groupInstanceId, }) } return controller.LeaveGroup(request) } golang-github-ibm-sarama-1.43.2/admin_test.go000066400000000000000000001431521461256741300210340ustar00rootroot00000000000000package sarama import ( "errors" "strings" "testing" "time" ) func TestClusterAdmin(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminInvalidController(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if admin != nil { defer safeClose(t, admin) } if err == nil { t.Fatal(errors.New("controller not set still cluster admin was created")) } if !errors.Is(err, ErrControllerNotAvailable) { t.Fatal(err) } } func TestClusterAdminCreateTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateTopicsRequest": NewMockCreateTopicsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreateTopic("my_topic", &TopicDetail{NumPartitions: 1, ReplicationFactor: 1}, false) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreateTopicWithInvalidTopicDetail(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateTopicsRequest": NewMockCreateTopicsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreateTopic("my_topic", nil, false) if err.Error() != "you must specify topic details" { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreateTopicWithoutAuthorization(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateTopicsRequest": NewMockCreateTopicsResponse(t), }) config := NewTestConfig() config.Version = V0_11_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreateTopic("_internal_topic", &TopicDetail{NumPartitions: 1, ReplicationFactor: 1}, false) want := "insufficient permissions to create topic with reserved prefix" if !strings.HasSuffix(err.Error(), want) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminListTopics(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetLeader("my_topic", 0, seedBroker.BrokerID()), "DescribeConfigsRequest": NewMockDescribeConfigsResponse(t), }) config := NewTestConfig() config.Version = V1_1_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } entries, err := admin.ListTopics() if err != nil { t.Fatal(err) } if len(entries) == 0 { t.Fatal(errors.New("no resource present")) } topic, found := entries["my_topic"] if !found { t.Fatal(errors.New("topic not found in response")) } _, found = topic.ConfigEntries["max.message.bytes"] if found { t.Fatal(errors.New("default topic config entry incorrectly found in response")) } value := topic.ConfigEntries["retention.ms"] if value == nil || *value != "5000" { t.Fatal(errors.New("non-default topic config entry not found in response")) } err = admin.Close() if err != nil { t.Fatal(err) } if topic.ReplicaAssignment == nil || topic.ReplicaAssignment[0][0] != 1 { t.Fatal(errors.New("replica assignment not found in response")) } } func TestClusterAdminDeleteTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DeleteTopicsRequest": NewMockDeleteTopicsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.DeleteTopic("my_topic") if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteEmptyTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DeleteTopicsRequest": NewMockDeleteTopicsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.DeleteTopic("") if !errors.Is(err, ErrInvalidTopic) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteTopicError(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DeleteTopicsRequest": NewMockDeleteTopicsResponse(t).SetError(ErrTopicDeletionDisabled), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.DeleteTopic("my_topic") if !errors.Is(err, ErrTopicDeletionDisabled) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreatePartitions(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreatePartitionsRequest": NewMockCreatePartitionsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreatePartitions("my_topic", 3, nil, false) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreatePartitionsWithDiffVersion(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreatePartitionsRequest": NewMockCreatePartitionsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreatePartitions("my_topic", 3, nil, false) if !errors.Is(err, ErrUnsupportedVersion) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreatePartitionsWithoutAuthorization(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreatePartitionsRequest": NewMockCreatePartitionsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = admin.CreatePartitions("_internal_topic", 3, nil, false) want := "insufficient permissions to create partition on topic with reserved prefix" if !strings.HasSuffix(err.Error(), want) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminAlterPartitionReassignments(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() secondBroker := NewMockBroker(t, 2) defer secondBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "MetadataRequest": NewMockMetadataResponse(t). SetController(secondBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "AlterPartitionReassignmentsRequest": NewMockAlterPartitionReassignmentsResponse(t), }) config := NewTestConfig() config.Version = V2_4_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topicAssignment := make([][]int32, 0, 3) err = admin.AlterPartitionReassignments("my_topic", topicAssignment) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminAlterPartitionReassignmentsWithDiffVersion(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() secondBroker := NewMockBroker(t, 2) defer secondBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(secondBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "AlterPartitionReassignmentsRequest": NewMockAlterPartitionReassignmentsResponse(t), }) config := NewTestConfig() config.Version = V2_3_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topicAssignment := make([][]int32, 0, 3) err = admin.AlterPartitionReassignments("my_topic", topicAssignment) if !strings.ContainsAny(err.Error(), ErrUnsupportedVersion.Error()) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminListPartitionReassignments(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() secondBroker := NewMockBroker(t, 2) defer secondBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "MetadataRequest": NewMockMetadataResponse(t). SetController(secondBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "ListPartitionReassignmentsRequest": NewMockListPartitionReassignmentsResponse(t), }) config := NewTestConfig() config.Version = V2_4_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } response, err := admin.ListPartitionReassignments("my_topic", []int32{0, 1}) if err != nil { t.Fatal(err) } partitionStatus, ok := response["my_topic"] if !ok { t.Fatalf("topic missing in response") } if len(partitionStatus) != 2 { t.Fatalf("partition missing in response") } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminListPartitionReassignmentsWithDiffVersion(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() secondBroker := NewMockBroker(t, 2) defer secondBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(secondBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "ListPartitionReassignmentsRequest": NewMockListPartitionReassignmentsResponse(t), }) config := NewTestConfig() config.Version = V2_3_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } partitions := make([]int32, 0) _, err = admin.ListPartitionReassignments("my_topic", partitions) if !strings.ContainsAny(err.Error(), ErrUnsupportedVersion.Error()) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteRecords(t *testing.T) { topicName := "my_topic" seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetLeader(topicName, 1, 1). SetLeader(topicName, 2, 1). SetLeader(topicName, 3, 1), "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } partitionOffsetFake := make(map[int32]int64) partitionOffsetFake[4] = 1000 errFake := admin.DeleteRecords(topicName, partitionOffsetFake) if errFake == nil { t.Fatal(err) } partitionOffset := make(map[int32]int64) partitionOffset[1] = 1000 partitionOffset[2] = 1000 partitionOffset[3] = 1000 err = admin.DeleteRecords(topicName, partitionOffset) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteRecordsWithInCorrectBroker(t *testing.T) { topicName := "my_topic" seedBroker := NewMockBroker(t, 1) secondBroker := NewMockBroker(t, 2) defer seedBroker.Close() defer secondBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.brokerID). SetLeader(topicName, 1, 1). SetLeader(topicName, 2, 1). SetLeader(topicName, 3, 2), "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.brokerID). SetLeader(topicName, 1, 1). SetLeader(topicName, 2, 1). SetLeader(topicName, 3, 2), "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } partitionOffset := make(map[int32]int64) partitionOffset[1] = 1000 partitionOffset[2] = 1000 partitionOffset[3] = 1000 err = admin.DeleteRecords(topicName, partitionOffset) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteRecordsWithUnsupportedVersion(t *testing.T) { topicName := "my_topic" seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetLeader(topicName, 1, 1). SetLeader(topicName, 2, 1). SetLeader(topicName, 3, 1), "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), }) config := NewTestConfig() config.Version = V0_10_2_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } partitionOffset := make(map[int32]int64) partitionOffset[1] = 1000 partitionOffset[2] = 1000 partitionOffset[3] = 1000 err = admin.DeleteRecords(topicName, partitionOffset) if err == nil { t.Fatal("expected an ErrDeleteRecords") } if !strings.HasPrefix(err.Error(), "kafka server: failed to delete records") { t.Fatal(err) } if !errors.Is(err, ErrDeleteRecords) { t.Fatal(err) } if !errors.Is(err, ErrUnsupportedVersion) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteRecordsWithLeaderNotAvailable(t *testing.T) { topicName := "my_topic" seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetLeader("my_topic", 1, -1). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } partitionOffset := make(map[int32]int64) partitionOffset[1] = 1000 err = admin.DeleteRecords(topicName, partitionOffset) if err == nil { t.Fatal("expected an ErrDeleteRecords") } if !strings.HasPrefix(err.Error(), "kafka server: failed to delete records") { t.Fatal(err) } if !errors.Is(err, ErrDeleteRecords) { t.Fatal(err) } if !errors.Is(err, ErrLeaderNotAvailable) { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDescribeConfig(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DescribeConfigsRequest": NewMockDescribeConfigsResponse(t), }) tests := []struct { saramaVersion KafkaVersion requestVersion int16 includeSynonyms bool }{ {V1_0_0_0, 0, false}, {V1_1_0_0, 1, true}, {V1_1_1_0, 1, true}, {V2_0_0_0, 2, true}, } for _, tt := range tests { t.Run(tt.saramaVersion.String(), func(t *testing.T) { config := NewTestConfig() config.Version = tt.saramaVersion admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer func() { _ = admin.Close() }() resource := ConfigResource{ Name: "r1", Type: TopicResource, ConfigNames: []string{"my_topic"}, } entries, err := admin.DescribeConfig(resource) if err != nil { t.Fatal(err) } history := seedBroker.History() describeReq, ok := history[len(history)-1].Request.(*DescribeConfigsRequest) if !ok { t.Fatal("failed to find DescribeConfigsRequest in mockBroker history") } if describeReq.Version != tt.requestVersion { t.Fatalf( "requestVersion %v did not match expected %v", describeReq.Version, tt.requestVersion) } if len(entries) == 0 { t.Fatal(errors.New("no resource present")) } if tt.includeSynonyms { if len(entries[0].Synonyms) == 0 { t.Fatal("expected synonyms to have been included") } } }) } } func TestClusterAdminDescribeConfigWithErrorCode(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DescribeConfigsRequest": NewMockDescribeConfigsResponseWithErrorCode(t), }) config := NewTestConfig() config.Version = V1_1_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer func() { _ = admin.Close() }() resource := ConfigResource{ Name: "r1", Type: TopicResource, ConfigNames: []string{"my_topic"}, } _, err = admin.DescribeConfig(resource) if err == nil { t.Fatal(errors.New("ErrorCode present but no Error returned")) } } // TestClusterAdminDescribeBrokerConfig ensures that a describe broker config // is sent to the broker in the resource struct, _not_ the controller func TestClusterAdminDescribeBrokerConfig(t *testing.T) { controllerBroker := NewMockBroker(t, 1) defer controllerBroker.Close() configBroker := NewMockBroker(t, 2) defer configBroker.Close() controllerBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), }) configBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), "DescribeConfigsRequest": NewMockDescribeConfigsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin( []string{ controllerBroker.Addr(), configBroker.Addr(), }, config) if err != nil { t.Fatal(err) } for _, resourceType := range []ConfigResourceType{BrokerResource, BrokerLoggerResource} { resource := ConfigResource{Name: "2", Type: resourceType} entries, err := admin.DescribeConfig(resource) if err != nil { t.Fatal(err) } if len(entries) == 0 { t.Fatal(errors.New("no resource present")) } } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminAlterConfig(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "AlterConfigsRequest": NewMockAlterConfigsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } var value string entries := make(map[string]*string) value = "60000" entries["retention.ms"] = &value err = admin.AlterConfig(TopicResource, "my_topic", entries, false) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminAlterConfigWithErrorCode(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "AlterConfigsRequest": NewMockAlterConfigsResponseWithErrorCode(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer func() { _ = admin.Close() }() var value string entries := make(map[string]*string) value = "60000" entries["retention.ms"] = &value err = admin.AlterConfig(TopicResource, "my_topic", entries, false) if err == nil { t.Fatal(errors.New("ErrorCode present but no Error returned")) } } func TestClusterAdminAlterBrokerConfig(t *testing.T) { controllerBroker := NewMockBroker(t, 1) defer controllerBroker.Close() configBroker := NewMockBroker(t, 2) defer configBroker.Close() controllerBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), }) configBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), "AlterConfigsRequest": NewMockAlterConfigsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin( []string{ controllerBroker.Addr(), configBroker.Addr(), }, config) if err != nil { t.Fatal(err) } var value string entries := make(map[string]*string) value = "3" entries["min.insync.replicas"] = &value for _, resourceType := range []ConfigResourceType{BrokerResource, BrokerLoggerResource} { resource := ConfigResource{Name: "2", Type: resourceType} err = admin.AlterConfig( resource.Type, resource.Name, entries, false) if err != nil { t.Fatal(err) } } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminIncrementalAlterConfig(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "IncrementalAlterConfigsRequest": NewMockIncrementalAlterConfigsResponse(t), }) config := NewTestConfig() config.Version = V2_3_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } var value string entries := make(map[string]IncrementalAlterConfigsEntry) value = "60000" entries["retention.ms"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationSet, Value: &value, } value = "1073741824" entries["segment.bytes"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationDelete, Value: &value, } err = admin.IncrementalAlterConfig(TopicResource, "my_topic", entries, false) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminIncrementalAlterConfigWithErrorCode(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "IncrementalAlterConfigsRequest": NewMockIncrementalAlterConfigsResponseWithErrorCode(t), }) config := NewTestConfig() config.Version = V2_3_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer func() { _ = admin.Close() }() var value string entries := make(map[string]IncrementalAlterConfigsEntry) value = "60000" entries["retention.ms"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationSet, Value: &value, } value = "1073741824" entries["segment.bytes"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationDelete, Value: &value, } err = admin.IncrementalAlterConfig(TopicResource, "my_topic", entries, false) if err == nil { t.Fatal(errors.New("ErrorCode present but no Error returned")) } } func TestClusterAdminIncrementalAlterBrokerConfig(t *testing.T) { controllerBroker := NewMockBroker(t, 1) defer controllerBroker.Close() configBroker := NewMockBroker(t, 2) defer configBroker.Close() controllerBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), }) configBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()). SetBroker(configBroker.Addr(), configBroker.BrokerID()), "IncrementalAlterConfigsRequest": NewMockIncrementalAlterConfigsResponse(t), }) config := NewTestConfig() config.Version = V2_3_0_0 admin, err := NewClusterAdmin( []string{ controllerBroker.Addr(), configBroker.Addr(), }, config) if err != nil { t.Fatal(err) } var value string entries := make(map[string]IncrementalAlterConfigsEntry) value = "3" entries["min.insync.replicas"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationSet, Value: &value, } value = "2" entries["log.cleaner.threads"] = IncrementalAlterConfigsEntry{ Operation: IncrementalAlterConfigsOperationDelete, Value: &value, } for _, resourceType := range []ConfigResourceType{BrokerResource, BrokerLoggerResource} { resource := ConfigResource{Name: "2", Type: resourceType} err = admin.IncrementalAlterConfig( resource.Type, resource.Name, entries, false) if err != nil { t.Fatal(err) } } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreateAcl(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateAclsRequest": NewMockCreateAclsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"} a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny} err = admin.CreateACL(r, a) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreateAclErrorHandling(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateAclsRequest": NewMockCreateAclsResponseWithError(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"} a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny} err = admin.CreateACL(r, a) if err == nil { t.Fatal(errors.New("error should have been thrown")) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminCreateAcls(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "CreateAclsRequest": NewMockCreateAclsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } rACLs := []*ResourceAcls{ { Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"}, Acls: []*Acl{ {Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny}, }, }, { Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "your_topic"}, Acls: []*Acl{ {Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny}, }, }, } err = admin.CreateACLs(rACLs) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminListAcls(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DescribeAclsRequest": NewMockListAclsResponse(t), "CreateAclsRequest": NewMockCreateAclsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"} a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny} err = admin.CreateACL(r, a) if err != nil { t.Fatal(err) } resourceName := "my_topic" filter := AclFilter{ ResourceType: AclResourceTopic, Operation: AclOperationRead, ResourceName: &resourceName, } rAcls, err := admin.ListAcls(filter) if err != nil { t.Fatal(err) } if len(rAcls) == 0 { t.Fatal("no acls present") } err = admin.Close() if err != nil { t.Fatal(err) } } func TestClusterAdminDeleteAcl(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DeleteAclsRequest": NewMockDeleteAclsResponse(t), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } resourceName := "my_topic" filter := AclFilter{ ResourceType: AclResourceTopic, Operation: AclOperationAlter, ResourceName: &resourceName, } _, err = admin.DeleteACL(filter, false) if err != nil { t.Fatal(err) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestDescribeTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetLeader("my_topic", 0, seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topics, err := admin.DescribeTopics([]string{"my_topic"}) if err != nil { t.Fatal(err) } if len(topics) != 1 { t.Fatalf("Expected 1 result, got %v", len(topics)) } if topics[0].Name != "my_topic" { t.Fatalf("Incorrect topic name: %v", topics[0].Name) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestDescribeTopicWithVersion0_11(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetLeader("my_topic", 0, seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) config := NewTestConfig() config.Version = V0_11_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topics, err := admin.DescribeTopics([]string{"my_topic"}) if err != nil { t.Fatal(err) } if len(topics) != 1 { t.Fatalf("Expected 1 result, got %v", len(topics)) } if topics[0].Name != "my_topic" { t.Fatalf("Incorrect topic name: %v", topics[0].Name) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestDescribeConsumerGroup(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() expectedGroupID := "my-group" seedBroker.SetHandlerByMap(map[string]MockResponse{ "DescribeGroupsRequest": NewMockDescribeGroupsResponse(t).AddGroupDescription(expectedGroupID, &GroupDescription{ GroupId: expectedGroupID, }), "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, expectedGroupID, seedBroker), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } result, err := admin.DescribeConsumerGroups([]string{expectedGroupID}) if err != nil { t.Fatal(err) } if len(result) != 1 { t.Fatalf("Expected 1 result, got %v", len(result)) } if result[0].GroupId != expectedGroupID { t.Fatalf("Expected groupID %v, got %v", expectedGroupID, result[0].GroupId) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestListConsumerGroups(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "ListGroupsRequest": NewMockListGroupsResponse(t). AddGroup("my-group", "consumer"), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } groups, err := admin.ListConsumerGroups() if err != nil { t.Fatal(err) } if len(groups) != 1 { t.Fatalf("Expected %v results, got %v", 1, len(groups)) } protocolType, ok := groups["my-group"] if !ok { t.Fatal("Expected group to be returned, but it did not") } if protocolType != "consumer" { t.Fatalf("Expected protocolType %v, got %v", "consumer", protocolType) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestListConsumerGroupsMultiBroker(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() secondBroker := NewMockBroker(t, 2) defer secondBroker.Close() firstGroup := "first" secondGroup := "second" nonExistingGroup := "non-existing-group" seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), "ListGroupsRequest": NewMockListGroupsResponse(t). AddGroup(firstGroup, "consumer"), }) secondBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), "ListGroupsRequest": NewMockListGroupsResponse(t). AddGroup(secondGroup, "consumer"), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } groups, err := admin.ListConsumerGroups() if err != nil { t.Fatal(err) } if len(groups) != 2 { t.Fatalf("Expected %v results, got %v", 1, len(groups)) } if _, found := groups[firstGroup]; !found { t.Fatalf("Expected group %v to be present in result set, but it isn't", firstGroup) } if _, found := groups[secondGroup]; !found { t.Fatalf("Expected group %v to be present in result set, but it isn't", secondGroup) } if _, found := groups[nonExistingGroup]; found { t.Fatalf("Expected group %v to not exist, but it exists", nonExistingGroup) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestListConsumerGroupOffsets(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() group := "my-group" topic := "my-topic" partition := int32(0) expectedOffset := int64(0) seedBroker.SetHandlerByMap(map[string]MockResponse{ "OffsetFetchRequest": NewMockOffsetFetchResponse(t).SetOffset(group, "my-topic", partition, expectedOffset, "", ErrNoError).SetError(ErrNoError), "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, group, seedBroker), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } response, err := admin.ListConsumerGroupOffsets(group, map[string][]int32{ topic: {0}, }) if err != nil { t.Fatalf("ListConsumerGroupOffsets failed with error %v", err) } block := response.GetBlock(topic, partition) if block == nil { t.Fatalf("Expected block for topic %v and partition %v to exist, but it doesn't", topic, partition) } if block.Offset != expectedOffset { t.Fatalf("Expected offset %v, got %v", expectedOffset, block.Offset) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestDeleteConsumerGroup(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() group := "my-group" seedBroker.SetHandlerByMap(map[string]MockResponse{ // "OffsetFetchRequest": NewMockOffsetFetchResponse(t).SetOffset(group, "my-topic", partition, expectedOffset, "", ErrNoError), "DeleteGroupsRequest": NewMockDeleteGroupsRequest(t).SetDeletedGroups([]string{group}), "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, group, seedBroker), }) config := NewTestConfig() config.Version = V1_1_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer admin.Close() err = admin.DeleteConsumerGroup(group) if err != nil { t.Fatalf("DeleteConsumerGroup failed with error %v", err) } } func TestDeleteOffset(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() group := "group-delete-offset" topic := "topic-delete-offset" partition := int32(0) handlerMap := map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, group, seedBroker), } seedBroker.SetHandlerByMap(handlerMap) config := NewTestConfig() config.Version = V2_4_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } // Test NoError handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNoError, topic, partition, ErrNoError) seedBroker.SetHandlerByMap(handlerMap) err = admin.DeleteConsumerGroupOffset(group, topic, partition) if err != nil { t.Fatalf("DeleteConsumerGroupOffset failed with error %v", err) } defer admin.Close() // Test Error handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNotCoordinatorForConsumer, topic, partition, ErrNoError) seedBroker.SetHandlerByMap(handlerMap) err = admin.DeleteConsumerGroupOffset(group, topic, partition) if !errors.Is(err, ErrNotCoordinatorForConsumer) { t.Fatalf("DeleteConsumerGroupOffset should have failed with error %v", ErrNotCoordinatorForConsumer) } // Test Error for partition handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNoError, topic, partition, ErrGroupSubscribedToTopic) seedBroker.SetHandlerByMap(handlerMap) err = admin.DeleteConsumerGroupOffset(group, topic, partition) if !errors.Is(err, ErrGroupSubscribedToTopic) { t.Fatalf("DeleteConsumerGroupOffset should have failed with error %v", ErrGroupSubscribedToTopic) } } // TestRefreshMetaDataWithDifferentController ensures that the cached // controller can be forcibly updated from Metadata by the admin client func TestRefreshMetaDataWithDifferentController(t *testing.T) { seedBroker1 := NewMockBroker(t, 1) seedBroker2 := NewMockBroker(t, 2) defer seedBroker1.Close() defer seedBroker2.Close() seedBroker1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker1.BrokerID()). SetBroker(seedBroker1.Addr(), seedBroker1.BrokerID()). SetBroker(seedBroker2.Addr(), seedBroker2.BrokerID()), }) config := NewTestConfig() config.Version = V1_1_0_0 client, err := NewClient([]string{seedBroker1.Addr()}, config) if err != nil { t.Fatal(err) } defer client.Close() ca := clusterAdmin{client: client, conf: config} if b, _ := ca.Controller(); seedBroker1.BrokerID() != b.ID() { t.Fatalf("expected cached controller to be %d rather than %d", seedBroker1.BrokerID(), b.ID()) } metadataResponse := NewMockMetadataResponse(t). SetController(seedBroker2.BrokerID()). SetBroker(seedBroker1.Addr(), seedBroker1.BrokerID()). SetBroker(seedBroker2.Addr(), seedBroker2.BrokerID()) seedBroker1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse, }) seedBroker2.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse, }) if b, _ := ca.refreshController(); seedBroker2.BrokerID() != b.ID() { t.Fatalf("expected refreshed controller to be %d rather than %d", seedBroker2.BrokerID(), b.ID()) } if b, _ := ca.Controller(); seedBroker2.BrokerID() != b.ID() { t.Fatalf("expected cached controller to be %d rather than %d", seedBroker2.BrokerID(), b.ID()) } } func TestDescribeLogDirs(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DescribeLogDirsRequest": NewMockDescribeLogDirsResponse(t). SetLogDirs("/tmp/logs", map[string]int{"topic1": 2, "topic2": 2}), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } logDirsPerBroker, err := admin.DescribeLogDirs([]int32{seedBroker.BrokerID()}) if err != nil { t.Fatal(err) } if len(logDirsPerBroker) != 1 { t.Fatalf("Expected %v results, got %v", 1, len(logDirsPerBroker)) } logDirs := logDirsPerBroker[seedBroker.BrokerID()] if len(logDirs) != 1 { t.Fatalf("Expected log dirs for broker %v to be returned, but it did not, got %v", seedBroker.BrokerID(), len(logDirs)) } logDirsBroker := logDirs[0] if !errors.Is(logDirsBroker.ErrorCode, ErrNoError) { t.Fatalf("Expected no error for broker %v, but it was %v", seedBroker.BrokerID(), logDirsBroker.ErrorCode) } if logDirsBroker.Path != "/tmp/logs" { t.Fatalf("Expected log dirs for broker %v to be '/tmp/logs', but it was %v", seedBroker.BrokerID(), logDirsBroker.Path) } if len(logDirsBroker.Topics) != 2 { t.Fatalf("Expected log dirs for broker %v to have 2 topics, but it had %v", seedBroker.BrokerID(), len(logDirsBroker.Topics)) } err = admin.Close() if err != nil { t.Fatal(err) } } func TestDescribeLogDirsUnknownBroker(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), "DescribeLogDirsRequest": NewMockDescribeLogDirsResponse(t). SetLogDirs("/tmp/logs", map[string]int{"topic1": 2, "topic2": 2}), }) config := NewTestConfig() config.Version = V1_0_0_0 admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer safeClose(t, admin) type result struct { metadata map[int32][]DescribeLogDirsResponseDirMetadata err error } res := make(chan result) go func() { metadata, err := admin.DescribeLogDirs([]int32{seedBroker.BrokerID() + 1}) res <- result{metadata, err} }() select { case <-time.After(time.Second): t.Fatalf("DescribeLogDirs timed out") case returned := <-res: if len(returned.metadata) != 0 { t.Fatalf("Expected no results, got %v", len(returned.metadata)) } if returned.err != nil { t.Fatalf("Expected no error, got %v", returned.err) } } } func Test_retryOnError(t *testing.T) { testBackoffTime := 100 * time.Millisecond config := NewTestConfig() config.Version = V1_0_0_0 config.Admin.Retry.Max = 3 config.Admin.Retry.Backoff = testBackoffTime admin := &clusterAdmin{conf: config} t.Run("immediate success", func(t *testing.T) { startTime := time.Now() attempts := 0 err := admin.retryOnError( func(error) bool { return true }, func() error { attempts++ return nil }) if err != nil { t.Fatalf("expected no error but was %v", err) } if attempts != 1 { t.Fatalf("expected 1 attempt to have been made but was %d", attempts) } if time.Since(startTime) >= testBackoffTime { t.Fatalf("single attempt should take less than backoff time") } }) t.Run("immediate failure", func(t *testing.T) { startTime := time.Now() attempts := 0 err := admin.retryOnError( func(error) bool { return false }, func() error { attempts++ return errors.New("mock error") }) if err == nil { t.Fatalf("expected error but was nil") } if attempts != 1 { t.Fatalf("expected 1 attempt to have been made but was %d", attempts) } if time.Since(startTime) >= testBackoffTime { t.Fatalf("single attempt should take less than backoff time") } }) t.Run("failing all attempts", func(t *testing.T) { startTime := time.Now() attempts := 0 err := admin.retryOnError( func(error) bool { return true }, func() error { attempts++ return errors.New("mock error") }) if err == nil { t.Errorf("expected error but was nil") } if attempts != 4 { t.Errorf("expected 4 attempts to have been made but was %d", attempts) } if time.Since(startTime) >= 4*testBackoffTime { t.Errorf("attempt+sleep+retry+sleep+retry+sleep+retry should take less than 4 * backoff time") } }) } golang-github-ibm-sarama-1.43.2/alter_client_quotas_request.go000066400000000000000000000101101461256741300245010ustar00rootroot00000000000000package sarama // AlterClientQuotas Request (Version: 0) => [entries] validate_only // entries => [entity] [ops] // entity => entity_type entity_name // entity_type => STRING // entity_name => NULLABLE_STRING // ops => key value remove // key => STRING // value => FLOAT64 // remove => BOOLEAN // validate_only => BOOLEAN type AlterClientQuotasRequest struct { Version int16 Entries []AlterClientQuotasEntry // The quota configuration entries to alter. ValidateOnly bool // Whether the alteration should be validated, but not performed. } type AlterClientQuotasEntry struct { Entity []QuotaEntityComponent // The quota entity to alter. Ops []ClientQuotasOp // An individual quota configuration entry to alter. } type ClientQuotasOp struct { Key string // The quota configuration key. Value float64 // The value to set, otherwise ignored if the value is to be removed. Remove bool // Whether the quota configuration value should be removed, otherwise set. } func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error { // Entries if err := pe.putArrayLength(len(a.Entries)); err != nil { return err } for _, e := range a.Entries { if err := e.encode(pe); err != nil { return err } } // ValidateOnly pe.putBool(a.ValidateOnly) return nil } func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error { // Entries entryCount, err := pd.getArrayLength() if err != nil { return err } if entryCount > 0 { a.Entries = make([]AlterClientQuotasEntry, entryCount) for i := range a.Entries { e := AlterClientQuotasEntry{} if err = e.decode(pd, version); err != nil { return err } a.Entries[i] = e } } else { a.Entries = []AlterClientQuotasEntry{} } // ValidateOnly validateOnly, err := pd.getBool() if err != nil { return err } a.ValidateOnly = validateOnly return nil } func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error { // Entity if err := pe.putArrayLength(len(a.Entity)); err != nil { return err } for _, component := range a.Entity { if err := component.encode(pe); err != nil { return err } } // Ops if err := pe.putArrayLength(len(a.Ops)); err != nil { return err } for _, o := range a.Ops { if err := o.encode(pe); err != nil { return err } } return nil } func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error { // Entity componentCount, err := pd.getArrayLength() if err != nil { return err } if componentCount > 0 { a.Entity = make([]QuotaEntityComponent, componentCount) for i := 0; i < componentCount; i++ { component := QuotaEntityComponent{} if err := component.decode(pd, version); err != nil { return err } a.Entity[i] = component } } else { a.Entity = []QuotaEntityComponent{} } // Ops opCount, err := pd.getArrayLength() if err != nil { return err } if opCount > 0 { a.Ops = make([]ClientQuotasOp, opCount) for i := range a.Ops { c := ClientQuotasOp{} if err = c.decode(pd, version); err != nil { return err } a.Ops[i] = c } } else { a.Ops = []ClientQuotasOp{} } return nil } func (c *ClientQuotasOp) encode(pe packetEncoder) error { // Key if err := pe.putString(c.Key); err != nil { return err } // Value pe.putFloat64(c.Value) // Remove pe.putBool(c.Remove) return nil } func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error { // Key key, err := pd.getString() if err != nil { return err } c.Key = key // Value value, err := pd.getFloat64() if err != nil { return err } c.Value = value // Remove remove, err := pd.getBool() if err != nil { return err } c.Remove = remove return nil } func (a *AlterClientQuotasRequest) key() int16 { return 49 } func (a *AlterClientQuotasRequest) version() int16 { return a.Version } func (a *AlterClientQuotasRequest) headerVersion() int16 { return 1 } func (a *AlterClientQuotasRequest) isValidVersion() bool { return a.Version == 0 } func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } golang-github-ibm-sarama-1.43.2/alter_client_quotas_request_test.go000066400000000000000000000111361461256741300255510ustar00rootroot00000000000000package sarama import "testing" var ( alterClientQuotasRequestSingleOp = []byte{ 0, 0, 0, 1, // entries len 0, 0, 0, 1, // entity len 0, 4, 'u', 's', 'e', 'r', // entity type 255, 255, // entity value 0, 0, 0, 1, // ops len 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 65, 46, 132, 128, 0, 0, 0, 0, // op value (1000000) 0, // remove 0, // validate only } alterClientQuotasRequestRemoveSingleOp = []byte{ 0, 0, 0, 1, // entries len 0, 0, 0, 1, // entity len 0, 4, 'u', 's', 'e', 'r', // entity type 255, 255, // entity value 0, 0, 0, 1, // ops len 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 0, 0, 0, 0, 0, 0, 0, 0, // op value (ignored) 1, // remove 1, // validate only } alterClientQuotasRequestMultipleOps = []byte{ 0, 0, 0, 1, // entries len 0, 0, 0, 1, // entity len 0, 4, 'u', 's', 'e', 'r', // entity type 255, 255, // entity value 0, 0, 0, 2, // ops len 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 65, 46, 132, 128, 0, 0, 0, 0, // op value (1000000) 0, // remove 0, 18, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 65, 46, 132, 128, 0, 0, 0, 0, // op value (1000000) 0, // remove 0, // validate only } alterClientQuotasRequestMultipleQuotasEntries = []byte{ 0, 0, 0, 2, // entries len 0, 0, 0, 1, // entity len 0, 4, 'u', 's', 'e', 'r', // entity type 255, 255, // entity value 0, 0, 0, 1, // ops len 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 65, 46, 132, 128, 0, 0, 0, 0, // op value (1000000) 0, // remove 0, 0, 0, 1, // entity len 0, 9, 'c', 'l', 'i', 'e', 'n', 't', '-', 'i', 'd', // entity type 255, 255, // entity value 0, 0, 0, 1, // ops len 0, 18, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', // op key 65, 46, 132, 128, 0, 0, 0, 0, // op value (1000000) 0, // remove 0, // validate only } ) func TestAlterClientQuotasRequest(t *testing.T) { // default user defaultUserComponent := QuotaEntityComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchDefault, } // default client-id defaultClientIDComponent := QuotaEntityComponent{ EntityType: QuotaEntityClientID, MatchType: QuotaMatchDefault, } // Add Quota to default user op := ClientQuotasOp{ Key: "producer_byte_rate", Value: 1000000, Remove: false, } entry := AlterClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Ops: []ClientQuotasOp{op}, } req := &AlterClientQuotasRequest{ Entries: []AlterClientQuotasEntry{entry}, ValidateOnly: false, } testRequest(t, "Add single Quota op", req, alterClientQuotasRequestSingleOp) // Remove Quota from default user op = ClientQuotasOp{ Key: "producer_byte_rate", Remove: true, } entry = AlterClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Ops: []ClientQuotasOp{op}, } req = &AlterClientQuotasRequest{ Entries: []AlterClientQuotasEntry{entry}, ValidateOnly: true, } testRequest(t, "Remove single Quota op", req, alterClientQuotasRequestRemoveSingleOp) // Add multiple Quotas ops op1 := ClientQuotasOp{ Key: "producer_byte_rate", Value: 1000000, Remove: false, } op2 := ClientQuotasOp{ Key: "consumer_byte_rate", Value: 1000000, Remove: false, } entry = AlterClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Ops: []ClientQuotasOp{op1, op2}, } req = &AlterClientQuotasRequest{ Entries: []AlterClientQuotasEntry{entry}, ValidateOnly: false, } testRequest(t, "Add multiple Quota ops", req, alterClientQuotasRequestMultipleOps) // Add multiple Quotas Entries op1 = ClientQuotasOp{ Key: "producer_byte_rate", Value: 1000000, Remove: false, } entry1 := AlterClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Ops: []ClientQuotasOp{op1}, } op2 = ClientQuotasOp{ Key: "consumer_byte_rate", Value: 1000000, Remove: false, } entry2 := AlterClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultClientIDComponent}, Ops: []ClientQuotasOp{op2}, } req = &AlterClientQuotasRequest{ Entries: []AlterClientQuotasEntry{entry1, entry2}, ValidateOnly: false, } testRequest(t, "Add multiple Quotas Entries", req, alterClientQuotasRequestMultipleQuotasEntries) } golang-github-ibm-sarama-1.43.2/alter_client_quotas_response.go000066400000000000000000000070601461256741300246610ustar00rootroot00000000000000package sarama import ( "time" ) // AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries] // throttle_time_ms => INT32 // entries => error_code error_message [entity] // error_code => INT16 // error_message => NULLABLE_STRING // entity => entity_type entity_name // entity_type => STRING // entity_name => NULLABLE_STRING type AlterClientQuotasResponse struct { Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered. } type AlterClientQuotasEntryResponse struct { ErrorCode KError // The error code, or `0` if the quota alteration succeeded. ErrorMsg *string // The error message, or `null` if the quota alteration succeeded. Entity []QuotaEntityComponent // The quota entity altered. } func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error { // ThrottleTime pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) // Entries if err := pe.putArrayLength(len(a.Entries)); err != nil { return err } for _, e := range a.Entries { if err := e.encode(pe); err != nil { return err } } return nil } func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) error { // ThrottleTime throttleTime, err := pd.getInt32() if err != nil { return err } a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond // Entries entryCount, err := pd.getArrayLength() if err != nil { return err } if entryCount > 0 { a.Entries = make([]AlterClientQuotasEntryResponse, entryCount) for i := range a.Entries { e := AlterClientQuotasEntryResponse{} if err = e.decode(pd, version); err != nil { return err } a.Entries[i] = e } } else { a.Entries = []AlterClientQuotasEntryResponse{} } return nil } func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error { // ErrorCode pe.putInt16(int16(a.ErrorCode)) // ErrorMsg if err := pe.putNullableString(a.ErrorMsg); err != nil { return err } // Entity if err := pe.putArrayLength(len(a.Entity)); err != nil { return err } for _, component := range a.Entity { if err := component.encode(pe); err != nil { return err } } return nil } func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) error { // ErrorCode errCode, err := pd.getInt16() if err != nil { return err } a.ErrorCode = KError(errCode) // ErrorMsg errMsg, err := pd.getNullableString() if err != nil { return err } a.ErrorMsg = errMsg // Entity componentCount, err := pd.getArrayLength() if err != nil { return err } if componentCount > 0 { a.Entity = make([]QuotaEntityComponent, componentCount) for i := 0; i < componentCount; i++ { component := QuotaEntityComponent{} if err := component.decode(pd, version); err != nil { return err } a.Entity[i] = component } } else { a.Entity = []QuotaEntityComponent{} } return nil } func (a *AlterClientQuotasResponse) key() int16 { return 49 } func (a *AlterClientQuotasResponse) version() int16 { return a.Version } func (a *AlterClientQuotasResponse) headerVersion() int16 { return 0 } func (a *AlterClientQuotasResponse) isValidVersion() bool { return a.Version == 0 } func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } func (r *AlterClientQuotasResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/alter_client_quotas_response_test.go000066400000000000000000000054701461256741300257230ustar00rootroot00000000000000package sarama import "testing" var ( alterClientQuotasResponseError = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 0, 0, 1, // Entries len 0, 42, // ErrorCode (ErrInvalidRequest) 0, 42, 'U', 'n', 'h', 'a', 'n', 'd', 'l', 'e', 'd', ' ', 'c', 'l', 'i', 'e', 'n', 't', ' ', 'q', 'u', 'o', 't', 'a', ' ', 'e', 'n', 't', 'i', 't', 'y', ' ', 't', 'y', 'p', 'e', ':', ' ', 'f', 'a', 'u', 'l', 't', 'y', // ErrorMsg 0, 0, 0, 1, // Entity len 0, 6, 'f', 'a', 'u', 'l', 't', 'y', // entityType 255, 255, // entityName } alterClientQuotasResponseSingleEntry = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 0, 0, 1, // Entries len 0, 0, // ErrorCode 255, 255, // ErrorMsg 0, 0, 0, 1, // Entity len 0, 4, 'u', 's', 'e', 'r', // entityType 255, 255, // entityName } alterClientQuotasResponseMultipleEntries = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 0, 0, 2, // Entries len 0, 0, // ErrorCode 255, 255, // ErrorMsg 0, 0, 0, 1, // Entity len 0, 4, 'u', 's', 'e', 'r', // entityType 255, 255, // entityName 0, 0, // ErrorCode 255, 255, // ErrorMsg 0, 0, 0, 1, // Entity len 0, 9, 'c', 'l', 'i', 'e', 'n', 't', '-', 'i', 'd', // entityType 255, 255, // entityName } ) func TestAlterClientQuotasResponse(t *testing.T) { // default user defaultUserComponent := QuotaEntityComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchDefault, } // default client-id defaultClientIDComponent := QuotaEntityComponent{ EntityType: QuotaEntityClientID, MatchType: QuotaMatchDefault, } // Response with error errMsg := "Unhandled client quota entity type: faulty" faultEntityComponent := QuotaEntityComponent{ EntityType: QuotaEntityType("faulty"), MatchType: QuotaMatchDefault, } entry := AlterClientQuotasEntryResponse{ ErrorCode: KError(42), ErrorMsg: &errMsg, Entity: []QuotaEntityComponent{faultEntityComponent}, } res := &AlterClientQuotasResponse{ ThrottleTime: 0, Entries: []AlterClientQuotasEntryResponse{entry}, } testResponse(t, "Response With Error", res, alterClientQuotasResponseError) // Response Altered single entry entry = AlterClientQuotasEntryResponse{ Entity: []QuotaEntityComponent{defaultUserComponent}, } res = &AlterClientQuotasResponse{ ThrottleTime: 0, Entries: []AlterClientQuotasEntryResponse{entry}, } testResponse(t, "Altered single entry", res, alterClientQuotasResponseSingleEntry) // Response Altered multiple entries entry1 := AlterClientQuotasEntryResponse{ Entity: []QuotaEntityComponent{defaultUserComponent}, } entry2 := AlterClientQuotasEntryResponse{ Entity: []QuotaEntityComponent{defaultClientIDComponent}, } res = &AlterClientQuotasResponse{ ThrottleTime: 0, Entries: []AlterClientQuotasEntryResponse{entry1, entry2}, } testResponse(t, "Altered multiple entries", res, alterClientQuotasResponseMultipleEntries) } golang-github-ibm-sarama-1.43.2/alter_configs_request.go000066400000000000000000000051221461256741300232660ustar00rootroot00000000000000package sarama // AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { Version int16 Resources []*AlterConfigsResource ValidateOnly bool } // AlterConfigsResource is an alter config resource type type AlterConfigsResource struct { Type ConfigResourceType Name string ConfigEntries map[string]*string } func (a *AlterConfigsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } for _, r := range a.Resources { if err := r.encode(pe); err != nil { return err } } pe.putBool(a.ValidateOnly) return nil } func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { resourceCount, err := pd.getArrayLength() if err != nil { return err } a.Resources = make([]*AlterConfigsResource, resourceCount) for i := range a.Resources { r := &AlterConfigsResource{} err = r.decode(pd, version) if err != nil { return err } a.Resources[i] = r } validateOnly, err := pd.getBool() if err != nil { return err } a.ValidateOnly = validateOnly return nil } func (a *AlterConfigsResource) encode(pe packetEncoder) error { pe.putInt8(int8(a.Type)) if err := pe.putString(a.Name); err != nil { return err } if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { return err } for configKey, configValue := range a.ConfigEntries { if err := pe.putString(configKey); err != nil { return err } if err := pe.putNullableString(configValue); err != nil { return err } } return nil } func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { t, err := pd.getInt8() if err != nil { return err } a.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } a.Name = name n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { a.ConfigEntries = make(map[string]*string, n) for i := 0; i < n; i++ { configKey, err := pd.getString() if err != nil { return err } if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { return err } } } return err } func (a *AlterConfigsRequest) key() int16 { return 33 } func (a *AlterConfigsRequest) version() int16 { return a.Version } func (a *AlterConfigsRequest) headerVersion() int16 { return 1 } func (a *AlterConfigsRequest) isValidVersion() bool { return a.Version >= 0 && a.Version <= 1 } func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { switch a.Version { case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_0_0_0 } } golang-github-ibm-sarama-1.43.2/alter_configs_request_test.go000066400000000000000000000035371461256741300243350ustar00rootroot00000000000000package sarama import "testing" var ( emptyAlterConfigsRequest = []byte{ 0, 0, 0, 0, // 0 configs 0, // don't Validate } singleAlterConfigsRequest = []byte{ 0, 0, 0, 1, // 1 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 1, // 1 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // don't validate } doubleAlterConfigsRequest = []byte{ 0, 0, 0, 2, // 2 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 1, // 1 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 2, // a topic 0, 3, 'b', 'a', 'r', // topic name: foo 0, 0, 0, 1, // 2 config 0, 12, // 12 chars 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // don't validate } ) func TestAlterConfigsRequest(t *testing.T) { var request *AlterConfigsRequest request = &AlterConfigsRequest{ Resources: []*AlterConfigsResource{}, } testRequest(t, "no requests", request, emptyAlterConfigsRequest) configValue := "1000" request = &AlterConfigsRequest{ Resources: []*AlterConfigsResource{ { Type: TopicResource, Name: "foo", ConfigEntries: map[string]*string{ "segment.ms": &configValue, }, }, }, } testRequest(t, "one config", request, singleAlterConfigsRequest) request = &AlterConfigsRequest{ Resources: []*AlterConfigsResource{ { Type: TopicResource, Name: "foo", ConfigEntries: map[string]*string{ "segment.ms": &configValue, }, }, { Type: TopicResource, Name: "bar", ConfigEntries: map[string]*string{ "retention.ms": &configValue, }, }, }, } testRequest(t, "two configs", request, doubleAlterConfigsRequest) } golang-github-ibm-sarama-1.43.2/alter_configs_response.go000066400000000000000000000053151461256741300234400ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) // AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } type AlterConfigError struct { Err KError ErrMsg string } func (c *AlterConfigError) Error() string { text := c.Err.Error() if c.ErrMsg != "" { text = fmt.Sprintf("%s - %s", text, c.ErrMsg) } return text } // AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string Type ConfigResourceType Name string } func (a *AlterConfigsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } for _, v := range a.Resources { if err := v.encode(pe); err != nil { return err } } return nil } func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond responseCount, err := pd.getArrayLength() if err != nil { return err } a.Resources = make([]*AlterConfigsResourceResponse, responseCount) for i := range a.Resources { a.Resources[i] = new(AlterConfigsResourceResponse) if err := a.Resources[i].decode(pd, version); err != nil { return err } } return nil } func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error { pe.putInt16(a.ErrorCode) err := pe.putString(a.ErrorMsg) if err != nil { return err } pe.putInt8(int8(a.Type)) err = pe.putString(a.Name) if err != nil { return err } return nil } func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error { errCode, err := pd.getInt16() if err != nil { return err } a.ErrorCode = errCode e, err := pd.getString() if err != nil { return err } a.ErrorMsg = e t, err := pd.getInt8() if err != nil { return err } a.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } a.Name = name return nil } func (a *AlterConfigsResponse) key() int16 { return 33 } func (a *AlterConfigsResponse) version() int16 { return a.Version } func (a *AlterConfigsResponse) headerVersion() int16 { return 0 } func (a *AlterConfigsResponse) isValidVersion() bool { return a.Version >= 0 && a.Version <= 1 } func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { switch a.Version { case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_0_0_0 } } func (r *AlterConfigsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/alter_configs_response_test.go000066400000000000000000000015511461256741300244750ustar00rootroot00000000000000package sarama import ( "testing" ) var ( alterResponseEmpty = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 0, // no configs } alterResponsePopulated = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', } ) func TestAlterConfigsResponse(t *testing.T) { var response *AlterConfigsResponse response = &AlterConfigsResponse{ Resources: []*AlterConfigsResourceResponse{}, } testVersionDecodable(t, "empty", response, alterResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &AlterConfigsResponse{ Resources: []*AlterConfigsResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", }, }, } testResponse(t, "response with error", response, alterResponsePopulated) } golang-github-ibm-sarama-1.43.2/alter_partition_reassignments_request.go000066400000000000000000000061101461256741300266070ustar00rootroot00000000000000package sarama type alterPartitionReassignmentsBlock struct { replicas []int32 } func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { return err } pe.putEmptyTaggedFieldArray() return nil } func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { if b.replicas, err = pd.getCompactInt32Array(); err != nil { return err } return nil } type AlterPartitionReassignmentsRequest struct { TimeoutMs int32 blocks map[string]map[int32]*alterPartitionReassignmentsBlock Version int16 } func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { pe.putInt32(r.TimeoutMs) pe.putCompactArrayLength(len(r.blocks)) for topic, partitions := range r.blocks { if err := pe.putCompactString(topic); err != nil { return err } pe.putCompactArrayLength(len(partitions)) for partition, block := range partitions { pe.putInt32(partition) if err := block.encode(pe); err != nil { return err } } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.TimeoutMs, err = pd.getInt32(); err != nil { return err } topicCount, err := pd.getCompactArrayLength() if err != nil { return err } if topicCount > 0 { r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) for i := 0; i < topicCount; i++ { topic, err := pd.getCompactString() if err != nil { return err } partitionCount, err := pd.getCompactArrayLength() if err != nil { return err } r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } block := &alterPartitionReassignmentsBlock{} if err := block.decode(pd); err != nil { return err } r.blocks[topic][partition] = block if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return } func (r *AlterPartitionReassignmentsRequest) key() int16 { return 45 } func (r *AlterPartitionReassignmentsRequest) version() int16 { return r.Version } func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { return 2 } func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool { return r.Version == 0 } func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) } if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) } r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} } golang-github-ibm-sarama-1.43.2/alter_partition_reassignments_request_test.go000066400000000000000000000031421461256741300276500ustar00rootroot00000000000000package sarama import "testing" var ( alterPartitionReassignmentsRequestNoBlock = []byte{ 0, 0, 39, 16, // timeout 10000 1, // 1-1=0 blocks 0, // empty tagged fields } alterPartitionReassignmentsRequestOneBlock = []byte{ 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions 0, 0, 0, 0, // partitionId 3, // 3-1=2 replica array size 0, 0, 3, 232, // replica 1000 0, 0, 3, 233, // replica 1001 0, 0, 0, // empty tagged fields } alterPartitionReassignmentsAbortRequest = []byte{ 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions 0, 0, 0, 0, // partitionId 0, // replica array is null (indicates that a pending reassignment should be aborted) 0, 0, 0, // empty tagged fields } ) func TestAlterPartitionReassignmentRequest(t *testing.T) { var request *AlterPartitionReassignmentsRequest request = &AlterPartitionReassignmentsRequest{ TimeoutMs: int32(10000), Version: int16(0), } testRequest(t, "no block", request, alterPartitionReassignmentsRequestNoBlock) request.AddBlock("topic", 0, []int32{1000, 1001}) testRequest(t, "one block", request, alterPartitionReassignmentsRequestOneBlock) request = &AlterPartitionReassignmentsRequest{ TimeoutMs: int32(10000), Version: int16(0), } request.AddBlock("topic", 0, nil) testRequest(t, "abort assignment", request, alterPartitionReassignmentsAbortRequest) } golang-github-ibm-sarama-1.43.2/alter_partition_reassignments_response.go000066400000000000000000000076531461256741300267720ustar00rootroot00000000000000package sarama import "time" type alterPartitionReassignmentsErrorBlock struct { errorCode KError errorMessage *string } func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { pe.putInt16(int16(b.errorCode)) if err := pe.putNullableCompactString(b.errorMessage); err != nil { return err } pe.putEmptyTaggedFieldArray() return nil } func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { errorCode, err := pd.getInt16() if err != nil { return err } b.errorCode = KError(errorCode) b.errorMessage, err = pd.getCompactNullableString() if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return err } type AlterPartitionReassignmentsResponse struct { Version int16 ThrottleTimeMs int32 ErrorCode KError ErrorMessage *string Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock } func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { if r.Errors == nil { r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) } partitions := r.Errors[topic] if partitions == nil { partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) r.Errors[topic] = partitions } partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} } func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { pe.putInt32(r.ThrottleTimeMs) pe.putInt16(int16(r.ErrorCode)) if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { return err } pe.putCompactArrayLength(len(r.Errors)) for topic, partitions := range r.Errors { if err := pe.putCompactString(topic); err != nil { return err } pe.putCompactArrayLength(len(partitions)) for partition, block := range partitions { pe.putInt32(partition) if err := block.encode(pe); err != nil { return err } } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } kerr, err := pd.getInt16() if err != nil { return err } r.ErrorCode = KError(kerr) if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { return err } numTopics, err := pd.getCompactArrayLength() if err != nil { return err } if numTopics > 0 { r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) for i := 0; i < numTopics; i++ { topic, err := pd.getCompactString() if err != nil { return err } ongoingPartitionReassignments, err := pd.getCompactArrayLength() if err != nil { return err } r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) for j := 0; j < ongoingPartitionReassignments; j++ { partition, err := pd.getInt32() if err != nil { return err } block := &alterPartitionReassignmentsErrorBlock{} if err := block.decode(pd); err != nil { return err } r.Errors[topic][partition] = block } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *AlterPartitionReassignmentsResponse) key() int16 { return 45 } func (r *AlterPartitionReassignmentsResponse) version() int16 { return r.Version } func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { return 1 } func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool { return r.Version == 0 } func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } golang-github-ibm-sarama-1.43.2/alter_partition_reassignments_response_test.go000066400000000000000000000023501461256741300300160ustar00rootroot00000000000000package sarama import "testing" var ( alterPartitionReassignmentsResponseNoError = []byte{ 0, 0, 39, 16, // ThrottleTimeMs 10000 0, 0, // errorcode 0, // null string 1, // empty errors array 0, // empty tagged fields } alterPartitionReassignmentsResponseWithError = []byte{ 0, 0, 39, 16, // ThrottleTimeMs 10000 0, 12, // errorcode 6, 101, 114, 114, 111, 114, // error string "error" 2, // errors array length 1 6, 116, 111, 112, 105, 99, // topic name "topic" 2, // partition array length 1 0, 0, 0, 1, // partitionId 0, 3, // kerror 7, 101, 114, 114, 111, 114, 50, // error string "error2" 0, 0, 0, // empty tagged fields } ) func TestAlterPartitionReassignmentResponse(t *testing.T) { var response *AlterPartitionReassignmentsResponse = &AlterPartitionReassignmentsResponse{ ThrottleTimeMs: int32(10000), Version: int16(0), } testResponse(t, "no error", response, alterPartitionReassignmentsResponseNoError) errorMessage := "error" partitionError := "error2" response.ErrorCode = 12 response.ErrorMessage = &errorMessage response.AddError("topic", 1, 3, &partitionError) testResponse(t, "with error", response, alterPartitionReassignmentsResponseWithError) } golang-github-ibm-sarama-1.43.2/alter_user_scram_credentials_request.go000066400000000000000000000070011461256741300263540ustar00rootroot00000000000000package sarama type AlterUserScramCredentialsRequest struct { Version int16 // Deletions represent list of SCRAM credentials to remove Deletions []AlterUserScramCredentialsDelete // Upsertions represent list of SCRAM credentials to update/insert Upsertions []AlterUserScramCredentialsUpsert } type AlterUserScramCredentialsDelete struct { Name string Mechanism ScramMechanismType } type AlterUserScramCredentialsUpsert struct { Name string Mechanism ScramMechanismType Iterations int32 Salt []byte saltedPassword []byte // This field is never transmitted over the wire // @see: https://tools.ietf.org/html/rfc5802 Password []byte } func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error { pe.putCompactArrayLength(len(r.Deletions)) for _, d := range r.Deletions { if err := pe.putCompactString(d.Name); err != nil { return err } pe.putInt8(int8(d.Mechanism)) pe.putEmptyTaggedFieldArray() } pe.putCompactArrayLength(len(r.Upsertions)) for _, u := range r.Upsertions { if err := pe.putCompactString(u.Name); err != nil { return err } pe.putInt8(int8(u.Mechanism)) pe.putInt32(u.Iterations) if err := pe.putCompactBytes(u.Salt); err != nil { return err } // do not transmit the password over the wire formatter := scramFormatter{mechanism: u.Mechanism} salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations)) if err != nil { return err } if err := pe.putCompactBytes(salted); err != nil { return err } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { numDeletions, err := pd.getCompactArrayLength() if err != nil { return err } r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions) for i := 0; i < numDeletions; i++ { r.Deletions[i] = AlterUserScramCredentialsDelete{} if r.Deletions[i].Name, err = pd.getCompactString(); err != nil { return err } mechanism, err := pd.getInt8() if err != nil { return err } r.Deletions[i].Mechanism = ScramMechanismType(mechanism) if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } numUpsertions, err := pd.getCompactArrayLength() if err != nil { return err } r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions) for i := 0; i < numUpsertions; i++ { r.Upsertions[i] = AlterUserScramCredentialsUpsert{} if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil { return err } mechanism, err := pd.getInt8() if err != nil { return err } r.Upsertions[i].Mechanism = ScramMechanismType(mechanism) if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil { return err } if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil { return err } if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil { return err } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *AlterUserScramCredentialsRequest) key() int16 { return 51 } func (r *AlterUserScramCredentialsRequest) version() int16 { return r.Version } func (r *AlterUserScramCredentialsRequest) headerVersion() int16 { return 2 } func (r *AlterUserScramCredentialsRequest) isValidVersion() bool { return r.Version == 0 } func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } golang-github-ibm-sarama-1.43.2/alter_user_scram_credentials_request_test.go000066400000000000000000000034231461256741300274170ustar00rootroot00000000000000package sarama import "testing" var ( emptyAlterUserScramCredentialsRequest = []byte{ 1, // Deletions 1, // Upsertions 0, // empty tagged fields } userAlterUserScramCredentialsRequest = []byte{ 2, // Deletions array, length 1 7, // User name length 6 'd', 'e', 'l', 'e', 't', 'e', // User name 2, // SCRAM_SHA_512 0, // empty tagged fields 2, // Upsertions array, length 1 7, // User name length 6 'u', 'p', 's', 'e', 'r', 't', 1, // SCRAM_SHA_256 0, 0, 16, 0, // iterations: 4096 // salt bytes: 6, 119, 111, 114, 108, 100, // saltedPassword: 33, 193, 85, 83, 3, 218, 48, 159, 107, 125, 30, 143, 228, 86, 54, 191, 221, 220, 75, 245, 100, 5, 231, 233, 78, 157, 21, 240, 231, 185, 203, 211, 128, 0, // empty tagged fields 0, // empty tagged fields } ) func TestAlterUserScramCredentialsRequest(t *testing.T) { request := &AlterUserScramCredentialsRequest{ Version: 0, Deletions: []AlterUserScramCredentialsDelete{}, Upsertions: []AlterUserScramCredentialsUpsert{}, } // Password is not transmitted, will fail with `testRequest` and `DeepEqual` check testRequestEncode(t, "no upsertions/deletions", request, emptyAlterUserScramCredentialsRequest) request.Deletions = []AlterUserScramCredentialsDelete{ { Name: "delete", Mechanism: SCRAM_MECHANISM_SHA_512, }, } request.Upsertions = []AlterUserScramCredentialsUpsert{ { Name: "upsert", Mechanism: SCRAM_MECHANISM_SHA_256, Iterations: 4096, Salt: []byte("world"), Password: []byte("hello"), }, } // Password is not transmitted, will fail with `testRequest` and `DeepEqual` check testRequestEncode(t, "single deletion and upsertion", request, userAlterUserScramCredentialsRequest) } golang-github-ibm-sarama-1.43.2/alter_user_scram_credentials_response.go000066400000000000000000000043121461256741300265240ustar00rootroot00000000000000package sarama import "time" type AlterUserScramCredentialsResponse struct { Version int16 ThrottleTime time.Duration Results []*AlterUserScramCredentialsResult } type AlterUserScramCredentialsResult struct { User string ErrorCode KError ErrorMessage *string } func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) pe.putCompactArrayLength(len(r.Results)) for _, u := range r.Results { if err := pe.putCompactString(u.User); err != nil { return err } pe.putInt16(int16(u.ErrorCode)) if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { return err } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond numResults, err := pd.getCompactArrayLength() if err != nil { return err } if numResults > 0 { r.Results = make([]*AlterUserScramCredentialsResult, numResults) for i := 0; i < numResults; i++ { r.Results[i] = &AlterUserScramCredentialsResult{} if r.Results[i].User, err = pd.getCompactString(); err != nil { return err } kerr, err := pd.getInt16() if err != nil { return err } r.Results[i].ErrorCode = KError(kerr) if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { return err } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *AlterUserScramCredentialsResponse) key() int16 { return 51 } func (r *AlterUserScramCredentialsResponse) version() int16 { return r.Version } func (r *AlterUserScramCredentialsResponse) headerVersion() int16 { return 2 } func (r *AlterUserScramCredentialsResponse) isValidVersion() bool { return r.Version == 0 } func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/alter_user_scram_credentials_response_test.go000066400000000000000000000020311461256741300275570ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( emptyAlterUserScramCredentialsResponse = []byte{ 0, 0, 11, 184, // throttle time 1, // empty results array 0, // empty tagged fields } userAlterUserScramCredentialsResponse = []byte{ 0, 0, 11, 184, // throttle time 2, // results array length 7, 'n', 'o', 'b', 'o', 'd', 'y', // User 0, 11, // ErrorCode 6, 'e', 'r', 'r', 'o', 'r', // ErrorMessage 0, // empty tagged fields 0, // empty tagged fields } ) func TestAlterUserScramCredentialsResponse(t *testing.T) { response := &AlterUserScramCredentialsResponse{ Version: 0, ThrottleTime: time.Second * 3, } testResponse(t, "empty response", response, emptyAlterUserScramCredentialsResponse) resultErrorMessage := "error" response.Results = append(response.Results, &AlterUserScramCredentialsResult{ User: "nobody", ErrorCode: 11, ErrorMessage: &resultErrorMessage, }) testResponse(t, "single user response", response, userAlterUserScramCredentialsResponse) } golang-github-ibm-sarama-1.43.2/api_versions_request.go000066400000000000000000000031211461256741300231450ustar00rootroot00000000000000package sarama const defaultClientSoftwareName = "sarama" type ApiVersionsRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // ClientSoftwareName contains the name of the client. ClientSoftwareName string // ClientSoftwareVersion contains the version of the client. ClientSoftwareVersion string } func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) { if r.Version >= 3 { if err := pe.putCompactString(r.ClientSoftwareName); err != nil { return err } if err := pe.putCompactString(r.ClientSoftwareVersion); err != nil { return err } pe.putEmptyTaggedFieldArray() } return nil } func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 3 { if r.ClientSoftwareName, err = pd.getCompactString(); err != nil { return err } if r.ClientSoftwareVersion, err = pd.getCompactString(); err != nil { return err } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *ApiVersionsRequest) key() int16 { return 18 } func (r *ApiVersionsRequest) version() int16 { return r.Version } func (r *ApiVersionsRequest) headerVersion() int16 { if r.Version >= 3 { return 2 } return 1 } func (r *ApiVersionsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_10_0_0 default: return V2_4_0_0 } } golang-github-ibm-sarama-1.43.2/api_versions_request_test.go000066400000000000000000000010621461256741300242060ustar00rootroot00000000000000package sarama import "testing" var ( apiVersionRequest []byte apiVersionRequestV3 = []byte{ 0x07, 's', 'a', 'r', 'a', 'm', 'a', 0x07, '0', '.', '1', '0', '.', '0', 0x00, } ) func TestApiVersionsRequest(t *testing.T) { request := new(ApiVersionsRequest) testRequest(t, "basic", request, apiVersionRequest) } func TestApiVersionsRequestV3(t *testing.T) { request := new(ApiVersionsRequest) request.Version = 3 request.ClientSoftwareName = "sarama" request.ClientSoftwareVersion = "0.10.0" testRequest(t, "v3", request, apiVersionRequestV3) } golang-github-ibm-sarama-1.43.2/api_versions_response.go000066400000000000000000000070611461256741300233220ustar00rootroot00000000000000package sarama import "time" // ApiVersionsResponseKey contains the APIs supported by the broker. type ApiVersionsResponseKey struct { // Version defines the protocol version to use for encode and decode Version int16 // ApiKey contains the API index. ApiKey int16 // MinVersion contains the minimum supported version, inclusive. MinVersion int16 // MaxVersion contains the maximum supported version, inclusive. MaxVersion int16 } func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) { a.Version = version pe.putInt16(a.ApiKey) pe.putInt16(a.MinVersion) pe.putInt16(a.MaxVersion) if version >= 3 { pe.putEmptyTaggedFieldArray() } return nil } func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) { a.Version = version if a.ApiKey, err = pd.getInt16(); err != nil { return err } if a.MinVersion, err = pd.getInt16(); err != nil { return err } if a.MaxVersion, err = pd.getInt16(); err != nil { return err } if version >= 3 { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } type ApiVersionsResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ErrorCode contains the top-level error code. ErrorCode int16 // ApiKeys contains the APIs supported by the broker. ApiKeys []ApiVersionsResponseKey // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ThrottleTimeMs int32 } func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) { pe.putInt16(r.ErrorCode) if r.Version >= 3 { pe.putCompactArrayLength(len(r.ApiKeys)) } else { if err := pe.putArrayLength(len(r.ApiKeys)); err != nil { return err } } for _, block := range r.ApiKeys { if err := block.encode(pe, r.Version); err != nil { return err } } if r.Version >= 1 { pe.putInt32(r.ThrottleTimeMs) } if r.Version >= 3 { pe.putEmptyTaggedFieldArray() } return nil } func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.ErrorCode, err = pd.getInt16(); err != nil { return err } var numApiKeys int if r.Version >= 3 { numApiKeys, err = pd.getCompactArrayLength() if err != nil { return err } } else { numApiKeys, err = pd.getArrayLength() if err != nil { return err } } r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys) for i := 0; i < numApiKeys; i++ { var block ApiVersionsResponseKey if err = block.decode(pd, r.Version); err != nil { return err } r.ApiKeys[i] = block } if r.Version >= 1 { if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } if r.Version >= 3 { if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *ApiVersionsResponse) key() int16 { return 18 } func (r *ApiVersionsResponse) version() int16 { return r.Version } func (r *ApiVersionsResponse) headerVersion() int16 { // ApiVersionsResponse always includes a v0 header. // See KIP-511 for details return 0 } func (r *ApiVersionsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_10_0_0 default: return V2_4_0_0 } } func (r *ApiVersionsResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } golang-github-ibm-sarama-1.43.2/api_versions_response_test.go000066400000000000000000000035121461256741300243560ustar00rootroot00000000000000package sarama import "testing" var ( apiVersionResponse = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x00, 0x02, 0x00, 0x01, } apiVersionResponseV3 = []byte{ 0x00, 0x00, // no error 0x02, // compact array length 1 0x00, 0x03, 0x00, 0x02, 0x00, 0x01, 0x00, // tagged fields 0x00, 0x00, 0x00, 0x00, // throttle time 0x01, 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tagged fields (empty SupportedFeatures) } ) func TestApiVersionsResponse(t *testing.T) { response := new(ApiVersionsResponse) testVersionDecodable(t, "no error", response, apiVersionResponse, 0) if response.ErrorCode != int16(ErrNoError) { t.Error("Decoding error failed: no error expected but found", response.ErrorCode) } if response.ApiKeys[0].ApiKey != 0x03 { t.Error("Decoding error: expected 0x03 but got", response.ApiKeys[0].ApiKey) } if response.ApiKeys[0].MinVersion != 0x02 { t.Error("Decoding error: expected 0x02 but got", response.ApiKeys[0].MinVersion) } if response.ApiKeys[0].MaxVersion != 0x01 { t.Error("Decoding error: expected 0x01 but got", response.ApiKeys[0].MaxVersion) } } func TestApiVersionsResponseV3(t *testing.T) { response := new(ApiVersionsResponse) response.Version = 3 testVersionDecodable(t, "no error", response, apiVersionResponseV3, 3) if response.ErrorCode != int16(ErrNoError) { t.Error("Decoding error failed: no error expected but found", response.ErrorCode) } if response.ApiKeys[0].ApiKey != 0x03 { t.Error("Decoding error: expected 0x03 but got", response.ApiKeys[0].ApiKey) } if response.ApiKeys[0].MinVersion != 0x02 { t.Error("Decoding error: expected 0x02 but got", response.ApiKeys[0].MinVersion) } if response.ApiKeys[0].MaxVersion != 0x01 { t.Error("Decoding error: expected 0x01 but got", response.ApiKeys[0].MaxVersion) } } golang-github-ibm-sarama-1.43.2/async_producer.go000066400000000000000000001210641461256741300217230ustar00rootroot00000000000000package sarama import ( "encoding/binary" "errors" "fmt" "math" "sync" "time" "github.com/eapache/go-resiliency/breaker" "github.com/eapache/queue" "github.com/rcrowley/go-metrics" ) // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, // and parses responses for errors. You must read from the Errors() channel or the // producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid // leaks and message lost: it will not be garbage-collected automatically when it passes // out of scope and buffered messages may not be flushed. type AsyncProducer interface { // AsyncClose triggers a shutdown of the producer. The shutdown has completed // when both the Errors and Successes channels have been closed. When calling // AsyncClose, you *must* continue to read from those channels in order to // drain the results of any messages in flight. AsyncClose() // Close shuts down the producer and waits for any buffered messages to be // flushed. You must call this function before a producer object passes out of // scope, as it may otherwise leak memory. You must call this before process // shutting down, or you may lose messages. You must call this before calling // Close on the underlying client. Close() error // Input is the input channel for the user to write messages to that they // wish to send. Input() chan<- *ProducerMessage // Successes is the success output channel back to the user when Return.Successes is // enabled. If Return.Successes is true, you MUST read from this channel or the // Producer will deadlock. It is suggested that you send and read messages // together in a single select statement. Successes() <-chan *ProducerMessage // Errors is the error output channel back to the user. You MUST read from this // channel or the Producer will deadlock when the channel is full. Alternatively, // you can set Producer.Return.Errors in your config to false, which prevents // errors to be returned. Errors() <-chan *ProducerError // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag // BeginTxn mark current transaction as ready. BeginTxn() error // CommitTxn commit current transaction. CommitTxn() error // AbortTxn abort current transaction. AbortTxn() error // AddOffsetsToTxn add associated offsets to current transaction. AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error // AddMessageToTxn add message offsets to current transaction. AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type asyncProducer struct { client Client conf *Config errors chan *ProducerError input, successes, retries chan *ProducerMessage inFlight sync.WaitGroup brokers map[*Broker]*brokerProducer brokerRefs map[*brokerProducer]int brokerLock sync.Mutex txnmgr *transactionManager txLock sync.Mutex metricsRegistry metrics.Registry } // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { client, err := NewClient(addrs, conf) if err != nil { return nil, err } return newAsyncProducer(client) } // NewAsyncProducerFromClient creates a new Producer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { // For clients passed in by the client, ensure we don't // call Close() on it. cli := &nopCloserClient{client} return newAsyncProducer(cli) } func newAsyncProducer(client Client) (AsyncProducer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } txnmgr, err := newTransactionManager(client.Config(), client) if err != nil { return nil, err } p := &asyncProducer{ client: client, conf: client.Config(), errors: make(chan *ProducerError), input: make(chan *ProducerMessage), successes: make(chan *ProducerMessage), retries: make(chan *ProducerMessage), brokers: make(map[*Broker]*brokerProducer), brokerRefs: make(map[*brokerProducer]int), txnmgr: txnmgr, metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry), } // launch our singleton dispatchers go withRecover(p.dispatcher) go withRecover(p.retryHandler) return p, nil } type flagSet int8 const ( syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer fin // final message from partitionProducer to brokerProducer and back shutdown // start the shutdown process endtxn // endtxn committxn // endtxn aborttxn // endtxn ) // ProducerMessage is the collection of elements passed to the Producer in order to send a message. type ProducerMessage struct { Topic string // The Kafka topic for this message. // The partitioning key for this message. Pre-existing Encoders include // StringEncoder and ByteEncoder. Key Encoder // The actual message to store in Kafka. Pre-existing Encoders include // StringEncoder and ByteEncoder. Value Encoder // The headers are key-value pairs that are transparently passed // by Kafka between producers and consumers. Headers []RecordHeader // This field is used to hold arbitrary data you wish to include so it // will be available when receiving on the Successes and Errors channels. // Sarama completely ignores this field and is only to be used for // pass-through data. Metadata interface{} // Below this point are filled in by the producer as the message is processed // Offset is the offset of the message stored on the broker. This is only // guaranteed to be defined if the message was successfully delivered and // RequiredAcks is not NoResponse. Offset int64 // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 // Timestamp can vary in behavior depending on broker configuration, being // in either one of the CreateTime or LogAppendTime modes (default CreateTime), // and requiring version at least 0.10.0. // // When configured to CreateTime, the timestamp is specified by the producer // either by explicitly setting this field, or when the message is added // to a produce set. // // When configured to LogAppendTime, the timestamp assigned to the message // by the broker. This is only guaranteed to be defined if the message was // successfully delivered and RequiredAcks is not NoResponse. Timestamp time.Time retries int flags flagSet expectation chan *ProducerError sequenceNumber int32 producerEpoch int16 hasSequence bool } const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. func (m *ProducerMessage) ByteSize(version int) int { var size int if version >= 2 { size = maximumRecordOverhead for _, h := range m.Headers { size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 } } else { size = producerMessageOverhead } if m.Key != nil { size += m.Key.Length() } if m.Value != nil { size += m.Value.Length() } return size } func (m *ProducerMessage) clear() { m.flags = 0 m.retries = 0 m.sequenceNumber = 0 m.producerEpoch = 0 m.hasSequence = false } // ProducerError is the type of error generated when the producer fails to deliver a message. // It contains the original ProducerMessage as well as the actual error value. type ProducerError struct { Msg *ProducerMessage Err error } func (pe ProducerError) Error() string { return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) } func (pe ProducerError) Unwrap() error { return pe.Err } // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel // when closing a producer. type ProducerErrors []*ProducerError func (pe ProducerErrors) Error() string { return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) } func (p *asyncProducer) IsTransactional() bool { return p.txnmgr.isTransactional() } func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { offsets := make(map[string][]*PartitionOffsetMetadata) offsets[msg.Topic] = []*PartitionOffsetMetadata{ { Partition: msg.Partition, Offset: msg.Offset + 1, Metadata: metadata, }, } return p.AddOffsetsToTxn(offsets, groupId) } func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { p.txLock.Lock() defer p.txLock.Unlock() if !p.IsTransactional() { DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID) return ErrNonTransactedProducer } DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID) return p.txnmgr.addOffsetsToTxn(offsets, groupId) } func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag { return p.txnmgr.currentTxnStatus() } func (p *asyncProducer) BeginTxn() error { p.txLock.Lock() defer p.txLock.Unlock() if !p.IsTransactional() { DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer") return ErrNonTransactedProducer } return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil) } func (p *asyncProducer) CommitTxn() error { p.txLock.Lock() defer p.txLock.Unlock() if !p.IsTransactional() { DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID) return ErrNonTransactedProducer } DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID) err := p.finishTransaction(true) if err != nil { return err } DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID) return nil } func (p *asyncProducer) AbortTxn() error { p.txLock.Lock() defer p.txLock.Unlock() if !p.IsTransactional() { DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID) return ErrNonTransactedProducer } DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID) err := p.finishTransaction(false) if err != nil { return err } DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID) return nil } func (p *asyncProducer) finishTransaction(commit bool) error { p.inFlight.Add(1) if commit { p.input <- &ProducerMessage{flags: endtxn | committxn} } else { p.input <- &ProducerMessage{flags: endtxn | aborttxn} } p.inFlight.Wait() return p.txnmgr.finishTransaction(commit) } func (p *asyncProducer) Errors() <-chan *ProducerError { return p.errors } func (p *asyncProducer) Successes() <-chan *ProducerMessage { return p.successes } func (p *asyncProducer) Input() chan<- *ProducerMessage { return p.input } func (p *asyncProducer) Close() error { p.AsyncClose() if p.conf.Producer.Return.Successes { go withRecover(func() { for range p.successes { } }) } var pErrs ProducerErrors if p.conf.Producer.Return.Errors { for event := range p.errors { pErrs = append(pErrs, event) } } else { <-p.errors } if len(pErrs) > 0 { return pErrs } return nil } func (p *asyncProducer) AsyncClose() { go withRecover(p.shutdown) } // singleton // dispatches messages by topic func (p *asyncProducer) dispatcher() { handlers := make(map[string]chan<- *ProducerMessage) shuttingDown := false for msg := range p.input { if msg == nil { Logger.Println("Something tried to send a nil message, it was ignored.") continue } if msg.flags&endtxn != 0 { var err error if msg.flags&committxn != 0 { err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil) } else { err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil) } if err != nil { Logger.Printf("producer/txnmgr unable to end transaction %s", err) } p.inFlight.Done() continue } if msg.flags&shutdown != 0 { shuttingDown = true p.inFlight.Done() continue } if msg.retries == 0 { if shuttingDown { // we can't just call returnError here because that decrements the wait group, // which hasn't been incremented yet for this message, and shouldn't be pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} if p.conf.Producer.Return.Errors { p.errors <- pErr } else { Logger.Println(pErr) } continue } p.inFlight.Add(1) // Ignore retried msg, there are already in txn. // Can't produce new record when transaction is not started. if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction) p.returnError(msg, ErrTransactionNotReady) continue } } for _, interceptor := range p.conf.Producer.Interceptors { msg.safelyApplyInterceptor(interceptor) } version := 1 if p.conf.Version.IsAtLeast(V0_11_0_0) { version = 2 } else if msg.Headers != nil { p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) continue } size := msg.ByteSize(version) if size > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes))) continue } handler := handlers[msg.Topic] if handler == nil { handler = p.newTopicProducer(msg.Topic) handlers[msg.Topic] = handler } handler <- msg } for _, handler := range handlers { close(handler) } } // one per topic // partitions messages, then dispatches them by partition type topicProducer struct { parent *asyncProducer topic string input <-chan *ProducerMessage breaker *breaker.Breaker handlers map[int32]chan<- *ProducerMessage partitioner Partitioner } func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) tp := &topicProducer{ parent: p, topic: topic, input: input, breaker: breaker.New(3, 1, 10*time.Second), handlers: make(map[int32]chan<- *ProducerMessage), partitioner: p.conf.Producer.Partitioner(topic), } go withRecover(tp.dispatch) return input } func (tp *topicProducer) dispatch() { for msg := range tp.input { if msg.retries == 0 { if err := tp.partitionMessage(msg); err != nil { tp.parent.returnError(msg, err) continue } } handler := tp.handlers[msg.Partition] if handler == nil { handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) tp.handlers[msg.Partition] = handler } handler <- msg } for _, handler := range tp.handlers { close(handler) } } func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { var partitions []int32 err := tp.breaker.Run(func() (err error) { requiresConsistency := false if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { requiresConsistency = ep.MessageRequiresConsistency(msg) } else { requiresConsistency = tp.partitioner.RequiresConsistency() } if requiresConsistency { partitions, err = tp.parent.client.Partitions(msg.Topic) } else { partitions, err = tp.parent.client.WritablePartitions(msg.Topic) } return }) if err != nil { return err } numPartitions := int32(len(partitions)) if numPartitions == 0 { return ErrLeaderNotAvailable } choice, err := tp.partitioner.Partition(msg, numPartitions) if err != nil { return err } else if choice < 0 || choice >= numPartitions { return ErrInvalidPartition } msg.Partition = partitions[choice] return nil } // one per partition per topic // dispatches messages to the appropriate broker // also responsible for maintaining message order during retries type partitionProducer struct { parent *asyncProducer topic string partition int32 input <-chan *ProducerMessage leader *Broker breaker *breaker.Breaker brokerProducer *brokerProducer // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, // all other messages get buffered in retryState[msg.retries].buf to preserve ordering // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and // therefore whether our buffer is complete and safe to flush) highWatermark int retryState []partitionRetryState } type partitionRetryState struct { buf []*ProducerMessage expectChaser bool } func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) pp := &partitionProducer{ parent: p, topic: topic, partition: partition, input: input, breaker: breaker.New(3, 1, 10*time.Second), retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), } go withRecover(pp.dispatch) return input } func (pp *partitionProducer) backoff(retries int) { var backoff time.Duration if pp.parent.conf.Producer.Retry.BackoffFunc != nil { maxRetries := pp.parent.conf.Producer.Retry.Max backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) } else { backoff = pp.parent.conf.Producer.Retry.Backoff } if backoff > 0 { time.Sleep(backoff) } } func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error { if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnError(msg, err) pp.backoff(msg.retries) return err } Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } return nil } func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) if pp.leader != nil { pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} } defer func() { if pp.brokerProducer != nil { pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) } }() for msg := range pp.input { if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { select { case <-pp.brokerProducer.abandoned: // a message on the abandoned channel means that our current broker selection is out of date Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) pp.brokerProducer = nil time.Sleep(pp.parent.conf.Producer.Retry.Backoff) default: // producer connection is still open. } } if msg.retries > pp.highWatermark { if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { continue } // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) pp.backoff(msg.retries) } else if pp.highWatermark > 0 { // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level if msg.retries < pp.highWatermark { // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) if msg.flags&fin == fin { pp.retryState[msg.retries].expectChaser = false pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected } else { pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) } continue } else if msg.flags&fin == fin { // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, // meaning this retry level is done and we can go down (at least) one level and flush that pp.retryState[pp.highWatermark].expectChaser = false pp.flushRetryBuffers() pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected continue } } // if we made it this far then the current msg contains real data, and can be sent to the next goroutine // without breaking any of our ordering guarantees if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { continue } // Now that we know we have a broker to actually try and send this message to, generate the sequence // number for it. // All messages being retried (sent or not) have already had their retry count updated // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) msg.hasSequence = true } if pp.parent.IsTransactional() { pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition) } pp.brokerProducer.input <- msg } } func (pp *partitionProducer) newHighWatermark(hwm int) { Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) pp.highWatermark = hwm // send off a fin so that we know when everything "in between" has made it // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) pp.retryState[pp.highWatermark].expectChaser = true pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} // a new HWM means that our current broker selection is out of date Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) pp.brokerProducer = nil } func (pp *partitionProducer) flushRetryBuffers() { Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) for { pp.highWatermark-- if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) goto flushDone } Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } for _, msg := range pp.retryState[pp.highWatermark].buf { pp.brokerProducer.input <- msg } flushDone: pp.retryState[pp.highWatermark].buf = nil if pp.retryState[pp.highWatermark].expectChaser { Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) break } else if pp.highWatermark == 0 { Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) break } } } func (pp *partitionProducer) updateLeader() error { return pp.breaker.Run(func() (err error) { if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { return err } if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { return err } pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} return nil }) } // one per broker; also constructs an associated flusher func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { var ( input = make(chan *ProducerMessage) bridge = make(chan *produceSet) pending = make(chan *brokerProducerResponse) responses = make(chan *brokerProducerResponse) ) bp := &brokerProducer{ parent: p, broker: broker, input: input, output: bridge, responses: responses, buffer: newProduceSet(p), currentRetries: make(map[string]map[int32]error), } go withRecover(bp.run) // minimal bridge to make the network response `select`able go withRecover(func() { // Use a wait group to know if we still have in flight requests var wg sync.WaitGroup for set := range bridge { request := set.buildRequest() // Count the in flight requests to know when we can close the pending channel safely wg.Add(1) // Capture the current set to forward in the callback sendResponse := func(set *produceSet) ProduceCallback { return func(response *ProduceResponse, err error) { // Forward the response to make sure we do not block the responseReceiver pending <- &brokerProducerResponse{ set: set, err: err, res: response, } wg.Done() } }(set) if p.IsTransactional() { // Add partition to tx before sending current batch err := p.txnmgr.publishTxnPartitions() if err != nil { // Request failed to be sent sendResponse(nil, err) continue } } // Use AsyncProduce vs Produce to not block waiting for the response // so that we can pipeline multiple produce requests and achieve higher throughput, see: // https://kafka.apache.org/protocol#protocol_network err := broker.AsyncProduce(request, sendResponse) if err != nil { // Request failed to be sent sendResponse(nil, err) continue } // Callback is not called when using NoResponse if p.conf.Producer.RequiredAcks == NoResponse { // Provide the expected nil response sendResponse(nil, nil) } } // Wait for all in flight requests to close the pending channel safely wg.Wait() close(pending) }) // In order to avoid a deadlock when closing the broker on network or malformed response error // we use an intermediate channel to buffer and send pending responses in order // This is because the AsyncProduce callback inside the bridge is invoked from the broker // responseReceiver goroutine and closing the broker requires such goroutine to be finished go withRecover(func() { buf := queue.New() for { if buf.Length() == 0 { res, ok := <-pending if !ok { // We are done forwarding the last pending response close(responses) return } buf.Add(res) } // Send the head pending response or buffer another one // so that we never block the callback headRes := buf.Peek().(*brokerProducerResponse) select { case res, ok := <-pending: if !ok { continue } buf.Add(res) continue case responses <- headRes: buf.Remove() continue } } }) if p.conf.Producer.Retry.Max <= 0 { bp.abandoned = make(chan struct{}) } return bp } type brokerProducerResponse struct { set *produceSet err error res *ProduceResponse } // groups messages together into appropriately-sized batches for sending to the broker // handles state related to retries etc type brokerProducer struct { parent *asyncProducer broker *Broker input chan *ProducerMessage output chan<- *produceSet responses <-chan *brokerProducerResponse abandoned chan struct{} buffer *produceSet timer *time.Timer timerFired bool closing error currentRetries map[string]map[int32]error } func (bp *brokerProducer) run() { var output chan<- *produceSet var timerChan <-chan time.Time Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) for { select { case msg, ok := <-bp.input: if !ok { Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID()) bp.shutdown() return } if msg == nil { continue } if msg.flags&syn == syn { Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", bp.broker.ID(), msg.Topic, msg.Partition) if bp.currentRetries[msg.Topic] == nil { bp.currentRetries[msg.Topic] = make(map[int32]error) } bp.currentRetries[msg.Topic][msg.Partition] = nil bp.parent.inFlight.Done() continue } if reason := bp.needsRetry(msg); reason != nil { bp.parent.retryMessage(msg, reason) if bp.closing == nil && msg.flags&fin == fin { // we were retrying this partition but we can start processing again delete(bp.currentRetries[msg.Topic], msg.Partition) Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", bp.broker.ID(), msg.Topic, msg.Partition) } continue } if msg.flags&fin == fin { // New broker producer that was caught up by the retry loop bp.parent.retryMessage(msg, ErrShuttingDown) DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n", bp.broker.ID(), msg.retries, msg.Topic, msg.Partition) continue } if bp.buffer.wouldOverflow(msg) { Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) if err := bp.waitForSpace(msg, false); err != nil { bp.parent.retryMessage(msg, err) continue } } if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { // The epoch was reset, need to roll the buffer over Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) if err := bp.waitForSpace(msg, true); err != nil { bp.parent.retryMessage(msg, err) continue } } if err := bp.buffer.add(msg); err != nil { bp.parent.returnError(msg, err) continue } if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency) timerChan = bp.timer.C } case <-timerChan: bp.timerFired = true case output <- bp.buffer: bp.rollOver() timerChan = nil case response, ok := <-bp.responses: if ok { bp.handleResponse(response) } } if bp.timerFired || bp.buffer.readyToFlush() { output = bp.output } else { output = nil } } } func (bp *brokerProducer) shutdown() { for !bp.buffer.empty() { select { case response := <-bp.responses: bp.handleResponse(response) case bp.output <- bp.buffer: bp.rollOver() } } close(bp.output) // Drain responses from the bridge goroutine for response := range bp.responses { bp.handleResponse(response) } // No more brokerProducer related goroutine should be running Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) } func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { if bp.closing != nil { return bp.closing } return bp.currentRetries[msg.Topic][msg.Partition] } func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { for { select { case response := <-bp.responses: bp.handleResponse(response) // handling a response can change our state, so re-check some things if reason := bp.needsRetry(msg); reason != nil { return reason } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { return nil } case bp.output <- bp.buffer: bp.rollOver() return nil } } } func (bp *brokerProducer) rollOver() { if bp.timer != nil { bp.timer.Stop() } bp.timer = nil bp.timerFired = false bp.buffer = newProduceSet(bp.parent) } func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { if response.err != nil { bp.handleError(response.set, response.err) } else { bp.handleSuccess(response.set, response.res) } if bp.buffer.empty() { bp.rollOver() // this can happen if the response invalidated our buffer } } func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { // we iterate through the blocks in the request set, not the response, so that we notice // if the response is missing a block completely var retryTopics []string sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { if response == nil { // this only happens when RequiredAcks is NoResponse, so we have to assume success bp.parent.returnSuccesses(pSet.msgs) return } block := response.GetBlock(topic, partition) if block == nil { bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse) return } switch block.Err { // Success case ErrNoError: if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { for _, msg := range pSet.msgs { msg.Timestamp = block.Timestamp } } for i, msg := range pSet.msgs { msg.Offset = block.Offset + int64(i) } bp.parent.returnSuccesses(pSet.msgs) // Duplicate case ErrDuplicateSequenceNumber: bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) } else { retryTopics = append(retryTopics, topic) } // Other non-retriable errors default: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) } bp.parent.returnErrors(pSet.msgs, block.Err) } }) if len(retryTopics) > 0 { if bp.parent.conf.Producer.Idempotent { err := bp.parent.client.RefreshMetadata(retryTopics...) if err != nil { Logger.Printf("Failed refreshing metadata because of %v\n", err) } } sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { block := response.GetBlock(topic, partition) if block == nil { // handled in the previous "eachPartition" loop return } switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { bp.currentRetries[topic] = make(map[int32]error) } bp.currentRetries[topic][partition] = block.Err if bp.parent.conf.Producer.Idempotent { go bp.parent.retryBatch(topic, partition, pSet, block.Err) } else { bp.parent.retryMessages(pSet.msgs, block.Err) } // dropping the following messages has the side effect of incrementing their retry count bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) } }) } } func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) { Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr) produceSet := newProduceSet(p) produceSet.msgs[topic] = make(map[int32]*partitionSet) produceSet.msgs[topic][partition] = pSet produceSet.bufferBytes += pSet.bufferBytes produceSet.bufferCount += len(pSet.msgs) for _, msg := range pSet.msgs { if msg.retries >= p.conf.Producer.Retry.Max { p.returnErrors(pSet.msgs, kerr) return } msg.retries++ } // it's expected that a metadata refresh has been requested prior to calling retryBatch leader, err := p.client.Leader(topic, partition) if err != nil { Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err) for _, msg := range pSet.msgs { p.returnError(msg, kerr) } return } bp := p.getBrokerProducer(leader) bp.output <- produceSet p.unrefBrokerProducer(leader, bp) } func (bp *brokerProducer) handleError(sent *produceSet, err error) { var target PacketEncodingError if errors.As(err, &target) { sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { bp.parent.returnErrors(pSet.msgs, err) }) } else { Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) bp.parent.abandonBrokerConnection(bp.broker) _ = bp.broker.Close() bp.closing = err sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { bp.parent.retryMessages(pSet.msgs, err) }) bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) { bp.parent.retryMessages(pSet.msgs, err) }) bp.rollOver() } } // singleton // effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock // based on https://godoc.org/github.com/eapache/channels#InfiniteChannel func (p *asyncProducer) retryHandler() { var msg *ProducerMessage buf := queue.New() for { if buf.Length() == 0 { msg = <-p.retries } else { select { case msg = <-p.retries: case p.input <- buf.Peek().(*ProducerMessage): buf.Remove() continue } } if msg == nil { return } buf.Add(msg) } } // utility functions func (p *asyncProducer) shutdown() { Logger.Println("Producer shutting down.") p.inFlight.Add(1) p.input <- &ProducerMessage{flags: shutdown} p.inFlight.Wait() err := p.client.Close() if err != nil { Logger.Println("producer/shutdown failed to close the embedded client:", err) } close(p.input) close(p.retries) close(p.errors) close(p.successes) p.metricsRegistry.UnregisterAll() } func (p *asyncProducer) bumpIdempotentProducerEpoch() { _, epoch := p.txnmgr.getProducerID() if epoch == math.MaxInt16 { Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID") txnmgr, err := newTransactionManager(p.conf, p.client) if err != nil { Logger.Println(err) return } p.txnmgr = txnmgr } else { p.txnmgr.bumpEpoch() } } func (p *asyncProducer) maybeTransitionToErrorState(err error) error { if errors.Is(err, ErrClusterAuthorizationFailed) || errors.Is(err, ErrProducerFenced) || errors.Is(err, ErrUnsupportedVersion) || errors.Is(err, ErrTransactionalIDAuthorizationFailed) { return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) } if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 { p.txnmgr.epochBumpRequired = true } return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { if p.IsTransactional() { _ = p.maybeTransitionToErrorState(err) } // We need to reset the producer ID epoch if we set a sequence number on it, because the broker // will never see a message with this number, so we can never continue the sequence. if !p.IsTransactional() && msg.hasSequence { Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) p.bumpIdempotentProducerEpoch() } msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { p.errors <- pErr } else { Logger.Println(pErr) } p.inFlight.Done() } func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { for _, msg := range batch { p.returnError(msg, err) } } func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { for _, msg := range batch { if p.conf.Producer.Return.Successes { msg.clear() p.successes <- msg } p.inFlight.Done() } } func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { if msg.retries >= p.conf.Producer.Retry.Max { p.returnError(msg, err) } else { msg.retries++ p.retries <- msg } } func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { for _, msg := range batch { p.retryMessage(msg, err) } } func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer { p.brokerLock.Lock() defer p.brokerLock.Unlock() bp := p.brokers[broker] if bp == nil { bp = p.newBrokerProducer(broker) p.brokers[broker] = bp p.brokerRefs[bp] = 0 } p.brokerRefs[bp]++ return bp } func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) { p.brokerLock.Lock() defer p.brokerLock.Unlock() p.brokerRefs[bp]-- if p.brokerRefs[bp] == 0 { close(bp.input) delete(p.brokerRefs, bp) if p.brokers[broker] == bp { delete(p.brokers, broker) } } } func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { p.brokerLock.Lock() defer p.brokerLock.Unlock() bc, ok := p.brokers[broker] if ok && bc.abandoned != nil { close(bc.abandoned) } delete(p.brokers, broker) } golang-github-ibm-sarama-1.43.2/async_producer_test.go000066400000000000000000002077541461256741300227750ustar00rootroot00000000000000package sarama import ( "errors" "log" "math" "os" "os/signal" "strconv" "sync" "sync/atomic" "testing" "time" "github.com/fortytw2/leaktest" "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/require" ) const TestMessage = "ABC THE MESSAGE" func closeProducerWithTimeout(t *testing.T, p AsyncProducer, timeout time.Duration) { var wg sync.WaitGroup p.AsyncClose() closer := make(chan struct{}) timer := time.AfterFunc(timeout, func() { t.Error("timeout") close(closer) }) defer timer.Stop() wg.Add(2) go func() { defer wg.Done() for { select { case <-closer: return case _, ok := <-p.Successes(): if !ok { return } t.Error("Unexpected message on Successes()") } } }() go func() { defer wg.Done() for { select { case <-closer: return case msg, ok := <-p.Errors(): if !ok { return } t.Error(msg.Err) } } }() wg.Wait() } func closeProducer(t *testing.T, p AsyncProducer) { closeProducerWithTimeout(t, p, 5*time.Minute) } func expectResultsWithTimeout(t *testing.T, p AsyncProducer, successCount, errorCount int, timeout time.Duration) { t.Helper() expect := successCount + errorCount defer func() { if successCount != 0 || errorCount != 0 { t.Error("Unexpected successes", successCount, "or errors", errorCount) } }() timer := time.NewTimer(timeout) defer timer.Stop() for expect > 0 { select { case <-timer.C: return case msg := <-p.Errors(): if msg.Msg.flags != 0 { t.Error("Message had flags set") } errorCount-- expect-- if errorCount < 0 { t.Error(msg.Err) } case msg := <-p.Successes(): if msg.flags != 0 { t.Error("Message had flags set") } successCount-- expect-- if successCount < 0 { t.Error("Too many successes") } } } } func expectResults(t *testing.T, p AsyncProducer, successCount, errorCount int) { expectResultsWithTimeout(t, p, successCount, errorCount, 5*time.Minute) } type testPartitioner chan *int32 func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { part := <-p if part == nil { return 0, errors.New("BOOM") } return *part, nil } func (p testPartitioner) RequiresConsistency() bool { return true } func (p testPartitioner) feed(partition int32) { p <- &partition } type flakyEncoder bool func (f flakyEncoder) Length() int { return len(TestMessage) } func (f flakyEncoder) Encode() ([]byte, error) { if !f { return nil, errors.New("flaky encoding error") } return []byte(TestMessage), nil } func TestAsyncProducer(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} } for i := 0; i < 10; i++ { select { case msg := <-producer.Errors(): t.Error(msg.Err) if msg.Msg.flags != 0 { t.Error("Message had flags set") } case msg := <-producer.Successes(): if msg.flags != 0 { t.Error("Message had flags set") } if msg.Metadata.(int) != i { t.Error("Message metadata did not match") } case <-time.After(time.Second): t.Errorf("Timeout waiting for msg #%d", i) goto done } } done: closeProducer(t, producer) leader.Close() seedBroker.Close() } func TestAsyncProducerMultipleFlushes(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) leader.Returns(prodSuccess) leader.Returns(prodSuccess) config := NewTestConfig() config.Producer.Flush.Messages = 5 config.Producer.Return.Successes = true producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for flush := 0; flush < 3; flush++ { for i := 0; i < 5; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } expectResults(t, producer, 5, 0) } closeProducer(t, producer) leader.Close() seedBroker.Close() } func TestAsyncProducerMultipleBrokers(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader0 := NewMockBroker(t, 2) leader1 := NewMockBroker(t, 3) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, nil, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodResponse0 := new(ProduceResponse) prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) leader0.Returns(prodResponse0) prodResponse1 := new(ProduceResponse) prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) leader1.Returns(prodResponse1) config := NewTestConfig() config.Producer.Flush.Messages = 5 config.Producer.Return.Successes = true config.Producer.Partitioner = NewRoundRobinPartitioner producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } expectResults(t, producer, 10, 0) closeProducer(t, producer) leader1.Close() leader0.Close() seedBroker.Close() } func TestAsyncProducerCustomPartitioner(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodResponse := new(ProduceResponse) prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodResponse) config := NewTestConfig() config.Producer.Flush.Messages = 2 config.Producer.Return.Successes = true config.Producer.Partitioner = func(topic string) Partitioner { p := make(testPartitioner) go func() { p.feed(0) p <- nil p <- nil p <- nil p.feed(0) }() return p } producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 5; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } expectResults(t, producer, 2, 3) closeProducer(t, producer) leader.Close() seedBroker.Close() } func TestAsyncProducerFailureRetry(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader1 := NewMockBroker(t, 2) leader2 := NewMockBroker(t, 3) metadataLeader1 := new(MetadataResponse) metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader1) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } seedBroker.Close() for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader1.Returns(prodNotLeader) metadataLeader2 := new(MetadataResponse) metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError) leader1.Returns(metadataLeader2) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader2.Returns(prodSuccess) expectResults(t, producer, 10, 0) leader1.Close() for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } leader2.Returns(prodSuccess) expectResults(t, producer, 10, 0) leader2.Close() closeProducer(t, producer) } func TestAsyncProducerRecoveryWithRetriesDisabled(t *testing.T) { tt := func(t *testing.T, kErr KError) { seedBroker := NewMockBroker(t, 0) broker1 := NewMockBroker(t, 1) broker2 := NewMockBroker(t, 2) mockLeader := func(leaderID int32) *MockMetadataResponse { return NewMockMetadataResponse(t). SetController(seedBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(broker1.Addr(), broker1.BrokerID()). SetBroker(broker2.Addr(), broker2.BrokerID()). SetLeader("my_topic", 0, leaderID). SetLeader("my_topic", 1, leaderID) } seedBroker.SetHandlerByMap( map[string]MockResponse{ "MetadataRequest": mockLeader(broker1.BrokerID()), }, ) config := NewTestConfig() config.ClientID = "TestAsyncProducerRecoveryWithRetriesDisabled" config.Producer.Flush.Messages = 2 config.Producer.Flush.Frequency = 100 * time.Millisecond config.Producer.Return.Successes = true config.Producer.Retry.Max = 0 // disable! config.Producer.Retry.Backoff = 0 config.Producer.Partitioner = NewManualPartitioner producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } broker1.SetHandlerByMap( map[string]MockResponse{ "MetadataRequest": mockLeader(broker1.BrokerID()), "ProduceRequest": NewMockProduceResponse(t). SetError("my_topic", 0, kErr). SetError("my_topic", 1, kErr), }, ) producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 1} expectResults(t, producer, 0, 2) seedBroker.SetHandlerByMap( map[string]MockResponse{ "MetadataRequest": mockLeader(broker2.BrokerID()), }, ) broker1.SetHandlerByMap( map[string]MockResponse{ "MetadataRequest": mockLeader(broker2.BrokerID()), }, ) broker2.SetHandlerByMap( map[string]MockResponse{ "MetadataRequest": mockLeader(broker2.BrokerID()), "ProduceRequest": NewMockProduceResponse(t). SetError("my_topic", 0, ErrNoError). SetError("my_topic", 1, ErrNoError), }, ) producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 1} expectResults(t, producer, 2, 0) closeProducer(t, producer) seedBroker.Close() broker1.Close() broker2.Close() } t.Run("retriable error", func(t *testing.T) { tt(t, ErrNotLeaderForPartition) }) t.Run("non-retriable error", func(t *testing.T) { tt(t, ErrNotController) }) } func TestAsyncProducerEncoderFailures(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) leader.Returns(prodSuccess) leader.Returns(prodSuccess) config := NewTestConfig() config.Producer.Flush.Messages = 1 config.Producer.Return.Successes = true config.Producer.Partitioner = NewManualPartitioner producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for flush := 0; flush < 3; flush++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)} producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)} producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)} expectResults(t, producer, 1, 2) } closeProducer(t, producer) leader.Close() seedBroker.Close() } // If a Kafka broker becomes unavailable and then returns back in service, then // producer reconnects to it and continues sending messages. func TestAsyncProducerBrokerBounce(t *testing.T) { // Given seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) leaderAddr := leader.Addr() metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) config := NewTestConfig() config.Producer.Flush.Messages = 1 config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} leader.Returns(prodSuccess) expectResults(t, producer, 1, 0) // When: a broker connection gets reset by a broker (network glitch, restart, you name it). leader.Close() // producer should get EOF leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles leader.Returns(metadataResponse) // tell it to go to broker 2 again // Then: a produced message goes through the new broker connection. producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} leader.Returns(prodSuccess) expectResults(t, producer, 1, 0) closeProducer(t, producer) seedBroker.Close() leader.Close() } func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader1 := NewMockBroker(t, 2) leader2 := NewMockBroker(t, 3) metadataLeader1 := new(MetadataResponse) metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader1) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Max = 3 config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } leader1.Close() // producer should get EOF seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down // ok fine, tell it to go to leader2 finally metadataLeader2 := new(MetadataResponse) metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader2) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader2.Returns(prodSuccess) expectResults(t, producer, 10, 0) seedBroker.Close() leader2.Close() closeProducer(t, producer) } func TestAsyncProducerMultipleRetries(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader1 := NewMockBroker(t, 2) leader2 := NewMockBroker(t, 3) metadataLeader1 := new(MetadataResponse) metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader1) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Max = 4 config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader1.Returns(prodNotLeader) metadataLeader2 := new(MetadataResponse) metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError) leader1.Returns(metadataLeader2) leader2.Returns(prodNotLeader) leader2.Returns(metadataLeader1) leader1.Returns(prodNotLeader) leader1.Returns(metadataLeader1) leader1.Returns(prodNotLeader) leader1.Returns(metadataLeader2) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader2.Returns(prodSuccess) expectResults(t, producer, 10, 0) for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } leader2.Returns(prodSuccess) expectResults(t, producer, 10, 0) seedBroker.Close() leader1.Close() leader2.Close() closeProducer(t, producer) } func TestAsyncProducerMultipleRetriesWithBackoffFunc(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader1 := NewMockBroker(t, 2) leader2 := NewMockBroker(t, 3) metadataLeader1 := new(MetadataResponse) metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader1) config := NewTestConfig() config.Producer.Flush.Messages = 1 config.Producer.Return.Successes = true config.Producer.Retry.Max = 4 backoffCalled := make([]int32, config.Producer.Retry.Max+1) config.Producer.Retry.BackoffFunc = func(retries, maxRetries int) time.Duration { atomic.AddInt32(&backoffCalled[retries-1], 1) return 0 } producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) metadataLeader2 := new(MetadataResponse) metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError) leader1.Returns(prodNotLeader) leader1.Returns(metadataLeader2) leader2.Returns(prodNotLeader) leader2.Returns(metadataLeader1) leader1.Returns(prodNotLeader) leader1.Returns(metadataLeader1) leader1.Returns(prodNotLeader) leader1.Returns(metadataLeader2) leader2.Returns(prodSuccess) expectResults(t, producer, 1, 0) producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} leader2.Returns(prodSuccess) expectResults(t, producer, 1, 0) seedBroker.Close() leader1.Close() leader2.Close() closeProducer(t, producer) for i := 0; i < config.Producer.Retry.Max; i++ { if atomic.LoadInt32(&backoffCalled[i]) != 1 { t.Errorf("expected one retry attempt #%d", i) } } if atomic.LoadInt32(&backoffCalled[config.Producer.Retry.Max]) != 0 { t.Errorf("expected no retry attempt #%d", config.Producer.Retry.Max) } } // https://github.com/IBM/sarama/issues/2129 func TestAsyncProducerMultipleRetriesWithConcurrentRequests(t *testing.T) { // Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) // The seed broker only handles Metadata request seedBroker.setHandler(func(req *request) (res encoderWithHeader) { metadataLeader := new(MetadataResponse) metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) return metadataLeader }) // Simulate a slow broker by taking ~200ms to handle requests // therefore triggering the read timeout and the retry logic leader.setHandler(func(req *request) (res encoderWithHeader) { time.Sleep(200 * time.Millisecond) // Will likely not be read by the producer (read timeout) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) return prodSuccess }) config := NewTestConfig() // Use very short read to simulate read error on unresponsive broker config.Net.ReadTimeout = 50 * time.Millisecond // Flush every record to generate up to 5 in-flight Produce requests // because config.Net.MaxOpenRequests defaults to 5 config.Producer.Flush.MaxMessages = 1 config.Producer.Return.Successes = true // Reduce retries to speed up the test while keeping the default backoff config.Producer.Retry.Max = 1 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } expectResults(t, producer, 0, 10) seedBroker.Close() leader.Close() closeProducer(t, producer) } func TestAsyncProducerBrokerRestart(t *testing.T) { // Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) var leaderLock sync.Mutex metadataRequestHandlerFunc := func(req *request) (res encoderWithHeader) { leaderLock.Lock() defer leaderLock.Unlock() metadataLeader := new(MetadataResponse) metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) return metadataLeader } // The seed broker only handles Metadata request in bootstrap seedBroker.setHandler(metadataRequestHandlerFunc) var emptyValues int32 = 0 countRecordsWithEmptyValue := func(req *request) { preq := req.body.(*ProduceRequest) if batch := preq.records["my_topic"][0].RecordBatch; batch != nil { for _, record := range batch.Records { if len(record.Value) == 0 { atomic.AddInt32(&emptyValues, 1) } } } if batch := preq.records["my_topic"][0].MsgSet; batch != nil { for _, record := range batch.Messages { if len(record.Msg.Value) == 0 { atomic.AddInt32(&emptyValues, 1) } } } } failedProduceRequestHandlerFunc := func(req *request) (res encoderWithHeader) { countRecordsWithEmptyValue(req) time.Sleep(50 * time.Millisecond) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) return prodSuccess } succeededProduceRequestHandlerFunc := func(req *request) (res encoderWithHeader) { countRecordsWithEmptyValue(req) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) return prodSuccess } leader.SetHandlerFuncByMap(map[string]requestHandlerFunc{ "ProduceRequest": failedProduceRequestHandlerFunc, "MetadataRequest": metadataRequestHandlerFunc, }) config := NewTestConfig() config.Producer.Retry.Backoff = 250 * time.Millisecond config.Producer.Flush.MaxMessages = 1 config.Producer.Return.Errors = true config.Producer.Return.Successes = true config.Producer.Retry.Max = 10 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } var wg sync.WaitGroup pushMsg := func() { defer wg.Done() for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} time.Sleep(50 * time.Millisecond) } } wg.Add(1) go pushMsg() for i := 0; i < 3; i++ { time.Sleep(100 * time.Millisecond) wg.Add(1) go pushMsg() } leader.Close() leaderLock.Lock() leader = NewMockBroker(t, 2) leaderLock.Unlock() leader.SetHandlerFuncByMap(map[string]requestHandlerFunc{ "ProduceRequest": succeededProduceRequestHandlerFunc, "MetadataRequest": metadataRequestHandlerFunc, }) wg.Wait() expectResultsWithTimeout(t, producer, 40, 0, 10*time.Second) seedBroker.Close() leader.Close() closeProducerWithTimeout(t, producer, 5*time.Second) if emptyValues := atomic.LoadInt32(&emptyValues); emptyValues > 0 { t.Fatalf("%d empty values", emptyValues) } } func TestAsyncProducerOutOfRetries(t *testing.T) { t.Skip("Enable once bug #294 is fixed.") seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 config.Producer.Retry.Max = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader.Returns(prodNotLeader) for i := 0; i < 10; i++ { select { case msg := <-producer.Errors(): if !errors.Is(msg.Err, ErrNotLeaderForPartition) { t.Error(msg.Err) } case <-producer.Successes(): t.Error("Unexpected success") } } seedBroker.Returns(metadataResponse) for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 10, 0) leader.Close() seedBroker.Close() safeClose(t, producer) } func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) leaderAddr := leader.Addr() metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 config.Producer.Retry.Max = 1 config.Producer.Partitioner = NewRoundRobinPartitioner producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } // prime partition 0 producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 1, 0) // prime partition 1 producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} prodSuccess = new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 1, 0) // reboot the broker (the producer will get EOF on its existing connection) leader.Close() leader = NewMockBrokerAddr(t, 2, leaderAddr) // send another message on partition 0 to trigger the EOF and retry producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} // tell partition 0 to go to that broker again leader.Returns(metadataResponse) // succeed this time prodSuccess = new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 1, 0) // shutdown closeProducer(t, producer) seedBroker.Close() leader.Close() } func TestAsyncProducerFlusherRetryCondition(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Producer.Flush.Messages = 5 config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 config.Producer.Retry.Max = 1 config.Producer.Partitioner = NewManualPartitioner producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } // prime partitions for p := int32(0); p < 2; p++ { for i := 0; i < 5; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} } prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 5, 0) } // send more messages on partition 0 for i := 0; i < 5; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} } prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader.Returns(prodNotLeader) time.Sleep(50 * time.Millisecond) // tell partition 0 to go to that broker again leader.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) // succeed this time expectResults(t, producer, 5, 0) // put five more through for i := 0; i < 5; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} } prodSuccess = new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 5, 0) // shutdown closeProducer(t, producer) seedBroker.Close() leader.Close() } func TestAsyncProducerRetryShutdown(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataLeader := new(MetadataResponse) metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } producer.AsyncClose() time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in producer.Input() <- &ProducerMessage{Topic: "FOO"} if err := <-producer.Errors(); !errors.Is(err.Err, ErrShuttingDown) { t.Error(err) } prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader.Returns(prodNotLeader) leader.Returns(metadataLeader) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) expectResults(t, producer, 10, 0) seedBroker.Close() leader.Close() // wait for the async-closed producer to shut down fully for err := range producer.Errors() { t.Error(err) } } func TestAsyncProducerNoReturns(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataLeader := new(MetadataResponse) metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = false config.Producer.Return.Errors = false config.Producer.Retry.Backoff = 0 producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } wait := make(chan bool) go func() { if err := producer.Close(); err != nil { t.Error(err) } close(wait) }() prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) <-wait seedBroker.Close() leader.Close() } func TestAsyncProducerIdempotentGoldenPath(t *testing.T) { broker := NewMockBroker(t, 1) metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataResponse) initProducerID := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: 1000, ProducerEpoch: 1, } broker.Returns(initProducerID) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Max = 4 config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = V0_11_0_0 producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodSuccess := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) broker.Returns(prodSuccess) expectResults(t, producer, 10, 0) broker.Close() closeProducer(t, producer) } func TestAsyncProducerIdempotentRetryCheckBatch(t *testing.T) { // Logger = log.New(os.Stderr, "", log.LstdFlags) tests := []struct { name string failAfterWrite bool }{ {"FailAfterWrite", true}, {"FailBeforeWrite", false}, } for _, test := range tests { broker := NewMockBroker(t, 1) metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) initProducerIDResponse := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: 1000, ProducerEpoch: 1, } prodNotLeaderResponse := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodNotLeaderResponse.AddTopicPartition("my_topic", 0, ErrNotEnoughReplicas) prodDuplicate := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodDuplicate.AddTopicPartition("my_topic", 0, ErrDuplicateSequenceNumber) prodOutOfSeq := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodOutOfSeq.AddTopicPartition("my_topic", 0, ErrOutOfOrderSequenceNumber) prodSuccessResponse := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodSuccessResponse.AddTopicPartition("my_topic", 0, ErrNoError) prodCounter := 0 lastBatchFirstSeq := -1 lastBatchSize := -1 lastSequenceWrittenToDisk := -1 handlerFailBeforeWrite := func(req *request) (res encoderWithHeader) { switch req.body.key() { case 3: return metadataResponse case 22: return initProducerIDResponse case 0: prodCounter++ preq := req.body.(*ProduceRequest) batch := preq.records["my_topic"][0].RecordBatch batchFirstSeq := int(batch.FirstSequence) batchSize := len(batch.Records) if lastSequenceWrittenToDisk == batchFirstSeq-1 { // in sequence append if lastBatchFirstSeq == batchFirstSeq { // is a batch retry if lastBatchSize == batchSize { // good retry // mock write to disk lastSequenceWrittenToDisk = batchFirstSeq + batchSize - 1 return prodSuccessResponse } t.Errorf("[%s] Retried Batch firstSeq=%d with different size old=%d new=%d", test.name, batchFirstSeq, lastBatchSize, batchSize) return prodOutOfSeq } // not a retry // save batch just received for future check lastBatchFirstSeq = batchFirstSeq lastBatchSize = batchSize if prodCounter%2 == 1 { if test.failAfterWrite { // mock write to disk lastSequenceWrittenToDisk = batchFirstSeq + batchSize - 1 } return prodNotLeaderResponse } // mock write to disk lastSequenceWrittenToDisk = batchFirstSeq + batchSize - 1 return prodSuccessResponse } if lastBatchFirstSeq == batchFirstSeq && lastBatchSize == batchSize { // is a good batch retry if lastSequenceWrittenToDisk == (batchFirstSeq + batchSize - 1) { // we already have the messages return prodDuplicate } // mock write to disk lastSequenceWrittenToDisk = batchFirstSeq + batchSize - 1 return prodSuccessResponse } // out of sequence / bad retried batch if lastBatchFirstSeq == batchFirstSeq && lastBatchSize != batchSize { t.Errorf("[%s] Retried Batch firstSeq=%d with different size old=%d new=%d", test.name, batchFirstSeq, lastBatchSize, batchSize) } else if lastSequenceWrittenToDisk+1 != batchFirstSeq { t.Errorf("[%s] Out of sequence message lastSequence=%d new batch starts at=%d", test.name, lastSequenceWrittenToDisk, batchFirstSeq) } else { t.Errorf("[%s] Unexpected error", test.name) } return prodOutOfSeq } return nil } config := NewTestConfig() config.Version = V0_11_0_0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Producer.RequiredAcks = WaitForAll config.Producer.Return.Successes = true config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Retry.Backoff = 100 * time.Millisecond broker.setHandler(handlerFailBeforeWrite) producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 3; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } go func() { for i := 0; i < 7; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("goroutine")} time.Sleep(100 * time.Millisecond) } }() expectResults(t, producer, 10, 0) broker.Close() closeProducer(t, producer) } } // test case for https://github.com/IBM/sarama/pull/2378 func TestAsyncProducerIdempotentRetryCheckBatch_2378(t *testing.T) { broker := NewMockBroker(t, 1) metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) initProducerIDResponse := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: 1000, ProducerEpoch: 1, } prodNotLeaderResponse := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodNotLeaderResponse.AddTopicPartition("my_topic", 0, ErrNotEnoughReplicas) handlerFailBeforeWrite := func(req *request) (res encoderWithHeader) { switch req.body.key() { case 3: return metadataResponse case 22: return initProducerIDResponse case 0: // for msg, always return error to trigger retryBatch return prodNotLeaderResponse } return nil } config := NewTestConfig() config.Version = V0_11_0_0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Producer.Retry.Max = 1 // set max retry to 1 config.Producer.RequiredAcks = WaitForAll config.Producer.Return.Successes = true config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Retry.Backoff = 100 * time.Millisecond broker.setHandler(handlerFailBeforeWrite) producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 3; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } go func() { for i := 0; i < 7; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("goroutine")} time.Sleep(100 * time.Millisecond) } }() // this will block until 5 minutes timeout before pr 2378 merge expectResults(t, producer, 0, 10) broker.Close() closeProducer(t, producer) } func TestAsyncProducerIdempotentErrorOnOutOfSeq(t *testing.T) { broker := NewMockBroker(t, 1) metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataResponse) initProducerID := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: 1000, ProducerEpoch: 1, } broker.Returns(initProducerID) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Retry.Max = 400000 config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = V0_11_0_0 producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodOutOfSeq := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodOutOfSeq.AddTopicPartition("my_topic", 0, ErrOutOfOrderSequenceNumber) broker.Returns(prodOutOfSeq) expectResults(t, producer, 0, 10) broker.Close() closeProducer(t, producer) } func TestAsyncProducerIdempotentEpochRollover(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataResponse) initProducerID := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: 1000, ProducerEpoch: 1, } broker.Returns(initProducerID) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Flush.Frequency = 10 * time.Millisecond config.Producer.Return.Successes = true config.Producer.Retry.Max = 1 // This test needs to exercise what happens when retries exhaust config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = V0_11_0_0 producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } defer closeProducer(t, producer) producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} prodError := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodError.AddTopicPartition("my_topic", 0, ErrBrokerNotAvailable) broker.Returns(prodError) <-producer.Errors() lastReqRes := broker.history[len(broker.history)-1] lastProduceBatch := lastReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch if lastProduceBatch.FirstSequence != 0 { t.Error("first sequence not zero") } if lastProduceBatch.ProducerEpoch != 1 { t.Error("first epoch was not one") } // Now if we produce again, the epoch should have rolled over. producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} broker.Returns(prodError) <-producer.Errors() lastReqRes = broker.history[len(broker.history)-1] lastProduceBatch = lastReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch if lastProduceBatch.FirstSequence != 0 { t.Error("second sequence not zero") } if lastProduceBatch.ProducerEpoch <= 1 { t.Error("second epoch was not > 1") } } // TestAsyncProducerIdempotentEpochExhaustion ensures that producer requests // a new producerID when producerEpoch is exhausted func TestAsyncProducerIdempotentEpochExhaustion(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() var ( initialProducerID = int64(1000) newProducerID = initialProducerID + 1 ) metadataResponse := &MetadataResponse{ Version: 4, ControllerID: 1, } metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataResponse) initProducerID := &InitProducerIDResponse{ ThrottleTime: 0, ProducerID: initialProducerID, ProducerEpoch: math.MaxInt16, // Mock ProducerEpoch at the exhaustion point } broker.Returns(initProducerID) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Flush.Frequency = 10 * time.Millisecond config.Producer.Return.Successes = true config.Producer.Retry.Max = 1 // This test needs to exercise what happens when retries exhaust config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = V0_11_0_0 producer, err := NewAsyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } defer closeProducer(t, producer) producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} prodError := &ProduceResponse{ Version: 3, ThrottleTime: 0, } prodError.AddTopicPartition("my_topic", 0, ErrBrokerNotAvailable) broker.Returns(prodError) broker.Returns(&InitProducerIDResponse{ ProducerID: newProducerID, }) <-producer.Errors() lastProduceReqRes := broker.history[len(broker.history)-2] // last is InitProducerIDRequest lastProduceBatch := lastProduceReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch if lastProduceBatch.FirstSequence != 0 { t.Error("first sequence not zero") } if lastProduceBatch.ProducerEpoch <= 1 { t.Error("first epoch was not at exhaustion point") } // Now we should produce with a new ProducerID producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} broker.Returns(prodError) <-producer.Errors() lastProduceReqRes = broker.history[len(broker.history)-1] lastProduceBatch = lastProduceReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch if lastProduceBatch.ProducerID != newProducerID || lastProduceBatch.ProducerEpoch != 0 { t.Error("producer did not requested a new producerID") } } // TestBrokerProducerShutdown ensures that a call to shutdown stops the // brokerProducer run() loop and doesn't leak any goroutines // //nolint:paralleltest func TestBrokerProducerShutdown(t *testing.T) { defer leaktest.Check(t)() metrics.UseNilMetrics = true // disable Sarama's go-metrics library defer func() { metrics.UseNilMetrics = false }() mockBroker := NewMockBroker(t, 1) metadataResponse := &MetadataResponse{} metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID()) metadataResponse.AddTopicPartition( "my_topic", 0, mockBroker.BrokerID(), nil, nil, nil, ErrNoError) mockBroker.Returns(metadataResponse) producer, err := NewAsyncProducer([]string{mockBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } broker := &Broker{ addr: mockBroker.Addr(), id: mockBroker.BrokerID(), } // Starts various goroutines in newBrokerProducer bp := producer.(*asyncProducer).getBrokerProducer(broker) // Initiate the shutdown of all of them producer.(*asyncProducer).unrefBrokerProducer(broker, bp) _ = producer.Close() mockBroker.Close() } type appendInterceptor struct { i int } func (b *appendInterceptor) OnSend(msg *ProducerMessage) { if b.i < 0 { panic("hey, the interceptor has failed") } v, _ := msg.Value.Encode() msg.Value = StringEncoder(string(v) + strconv.Itoa(b.i)) b.i++ } func (b *appendInterceptor) OnConsume(msg *ConsumerMessage) { if b.i < 0 { panic("hey, the interceptor has failed") } msg.Value = []byte(string(msg.Value) + strconv.Itoa(b.i)) b.i++ } func testProducerInterceptor( t *testing.T, interceptors []ProducerInterceptor, expectationFn func(*testing.T, int, *ProducerMessage), ) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataLeader := new(MetadataResponse) metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader) config := NewTestConfig() config.Producer.Flush.Messages = 10 config.Producer.Return.Successes = true config.Producer.Interceptors = interceptors producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} } prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) for i := 0; i < 10; i++ { select { case msg := <-producer.Errors(): t.Error(msg.Err) case msg := <-producer.Successes(): expectationFn(t, i, msg) } } closeProducer(t, producer) leader.Close() seedBroker.Close() } func TestAsyncProducerInterceptors(t *testing.T) { tests := []struct { name string interceptors []ProducerInterceptor expectationFn func(*testing.T, int, *ProducerMessage) }{ { name: "intercept messages", interceptors: []ProducerInterceptor{&appendInterceptor{i: 0}}, expectationFn: func(t *testing.T, i int, msg *ProducerMessage) { v, _ := msg.Value.Encode() expected := TestMessage + strconv.Itoa(i) if string(v) != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain", interceptors: []ProducerInterceptor{&appendInterceptor{i: 0}, &appendInterceptor{i: 1000}}, expectationFn: func(t *testing.T, i int, msg *ProducerMessage) { v, _ := msg.Value.Encode() expected := TestMessage + strconv.Itoa(i) + strconv.Itoa(i+1000) if string(v) != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain with one interceptor failing", interceptors: []ProducerInterceptor{&appendInterceptor{i: -1}, &appendInterceptor{i: 1000}}, expectationFn: func(t *testing.T, i int, msg *ProducerMessage) { v, _ := msg.Value.Encode() expected := TestMessage + strconv.Itoa(i+1000) if string(v) != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain with all interceptors failing", interceptors: []ProducerInterceptor{&appendInterceptor{i: -1}, &appendInterceptor{i: -1}}, expectationFn: func(t *testing.T, i int, msg *ProducerMessage) { v, _ := msg.Value.Encode() expected := TestMessage if string(v) != expected { t.Errorf("Interceptor should have not changed the value, got %s, expected %s", v, expected) } }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { testProducerInterceptor(t, tt.interceptors, tt.expectationFn) }) } } func TestProducerError(t *testing.T) { t.Parallel() err := ProducerError{Err: ErrOutOfBrokers} if !errors.Is(err, ErrOutOfBrokers) { t.Error("unexpected errors.Is") } } func TestTxmngInitProducerId(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) broker.Returns(metadataLeader) config := NewTestConfig() config.Producer.Idempotent = true config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) require.Equal(t, int64(1), txmng.producerID) require.Equal(t, int16(0), txmng.producerEpoch) } func TestTxnProduceBumpEpoch(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V2_6_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Return.Errors = false config.ApiVersionsRequest = false metadataLeader := new(MetadataResponse) metadataLeader.Version = 9 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1000, ProducerEpoch: 0, Version: 3, } broker.Returns(producerIdResponse) ap, err := NewAsyncProducerFromClient(client) producer := ap.(*asyncProducer) require.NoError(t, err) defer ap.Close() require.Equal(t, int64(1000), producer.txnmgr.producerID) require.Equal(t, int16(0), producer.txnmgr.producerEpoch) addPartitionsToTxnResponse := &AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, }, }, }, } broker.Returns(addPartitionsToTxnResponse) produceResponse := new(ProduceResponse) produceResponse.Version = 7 produceResponse.AddTopicPartition("test-topic", 0, ErrOutOfOrderSequenceNumber) broker.Returns(produceResponse) err = producer.BeginTxn() require.NoError(t, err) producer.Input() <- &ProducerMessage{Topic: "test-topic", Key: nil, Value: StringEncoder(TestMessage)} // Force send producer.inFlight.Add(1) producer.Input() <- &ProducerMessage{flags: shutdown} producer.inFlight.Wait() err = producer.CommitTxn() require.Error(t, err) require.Equal(t, ProducerTxnFlagInError|ProducerTxnFlagAbortableError, producer.txnmgr.status) err = producer.CommitTxn() require.Error(t, err) require.Equal(t, ProducerTxnFlagInError|ProducerTxnFlagAbortableError, producer.txnmgr.status) endTxnResponse := &EndTxnResponse{ Err: ErrNoError, } broker.Returns(endTxnResponse) producerBumpIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1000, ProducerEpoch: 1, Version: 3, } broker.Returns(producerBumpIdResponse) err = producer.AbortTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) require.Equal(t, int64(1000), producer.txnmgr.producerID) require.Equal(t, int16(1), producer.txnmgr.producerEpoch) } func TestTxnProduceRecordWithCommit(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) ap, err := NewAsyncProducerFromClient(client) producer := ap.(*asyncProducer) require.NoError(t, err) defer ap.Close() addPartitionsToTxnResponse := &AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, }, }, }, } broker.Returns(addPartitionsToTxnResponse) produceResponse := new(ProduceResponse) produceResponse.Version = 3 produceResponse.AddTopicPartition("test-topic", 0, ErrNoError) broker.Returns(produceResponse) endTxnResponse := &EndTxnResponse{ Err: ErrNoError, } broker.Returns(endTxnResponse) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) err = producer.BeginTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagInTransaction, producer.txnmgr.status) producer.Input() <- &ProducerMessage{Topic: "test-topic", Key: nil, Value: StringEncoder(TestMessage)} err = producer.CommitTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) } func TestTxnProduceBatchAddPartition(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Retry.Max = 1 config.Producer.Flush.Messages = 3 config.Producer.Flush.Frequency = 30 * time.Second config.Producer.Flush.Bytes = 1 << 12 config.Producer.Partitioner = NewManualPartitioner metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) metadataLeader.AddTopicPartition("test-topic", 1, broker.BrokerID(), nil, nil, nil, ErrNoError) metadataLeader.AddTopicPartition("test-topic", 2, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) ap, err := NewAsyncProducerFromClient(client) producer := ap.(*asyncProducer) require.NoError(t, err) defer ap.Close() go func() { for err := range producer.Errors() { require.NoError(t, err) } }() broker.Returns(&AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: ErrNoError, }, { Partition: 1, Err: ErrNoError, }, { Partition: 2, Err: ErrNoError, }, }, }, }) produceResponse := new(ProduceResponse) produceResponse.Version = 3 produceResponse.AddTopicPartition("test-topic", 0, ErrNoError) produceResponse.AddTopicPartition("test-topic", 1, ErrNoError) produceResponse.AddTopicPartition("test-topic", 2, ErrNoError) broker.Returns(produceResponse) endTxnResponse := &EndTxnResponse{ Err: ErrNoError, } broker.Returns(endTxnResponse) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) err = producer.BeginTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagInTransaction, producer.txnmgr.status) producer.Input() <- &ProducerMessage{Topic: "test-topic", Partition: 0, Key: nil, Value: StringEncoder("partition-0")} producer.Input() <- &ProducerMessage{Topic: "test-topic", Partition: 1, Key: nil, Value: StringEncoder("partition-1")} producer.Input() <- &ProducerMessage{Topic: "test-topic", Partition: 2, Key: nil, Value: StringEncoder("partition-2")} err = producer.CommitTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) produceExchange := broker.History()[len(broker.History())-2] produceRequest := produceExchange.Request.(*ProduceRequest) require.Equal(t, 3, len(produceRequest.records["test-topic"])) addPartitionExchange := broker.History()[len(broker.History())-3] addpartitionRequest := addPartitionExchange.Request.(*AddPartitionsToTxnRequest) require.Equal(t, 3, len(addpartitionRequest.TopicPartitions["test-topic"])) require.Contains(t, addpartitionRequest.TopicPartitions["test-topic"], int32(0)) require.Contains(t, addpartitionRequest.TopicPartitions["test-topic"], int32(1)) require.Contains(t, addpartitionRequest.TopicPartitions["test-topic"], int32(2)) } func TestTxnProduceRecordWithAbort(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) ap, err := NewAsyncProducerFromClient(client) producer := ap.(*asyncProducer) require.NoError(t, err) defer ap.Close() broker.Returns(&AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: ErrNoError, }, }, }, }) produceResponse := new(ProduceResponse) produceResponse.Version = 3 produceResponse.AddTopicPartition("test-topic", 0, ErrNoError) broker.Returns(produceResponse) endTxnResponse := &EndTxnResponse{ Err: ErrNoError, } broker.Returns(endTxnResponse) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) err = producer.BeginTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagInTransaction, producer.txnmgr.status) producer.Input() <- &ProducerMessage{Topic: "test-topic", Key: nil, Value: StringEncoder(TestMessage)} err = producer.AbortTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) } func TestTxnCanAbort(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Producer.Return.Errors = false config.Producer.Return.Successes = true config.Producer.Retry.Backoff = 0 config.Producer.Flush.Messages = 1 config.Producer.Retry.Max = 1 config.Net.MaxOpenRequests = 1 metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopic("test-topic-2", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) metadataLeader.AddTopicPartition("test-topic-2", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) ap, err := NewAsyncProducerFromClient(client) producer := ap.(*asyncProducer) require.NoError(t, err) defer ap.Close() broker.Returns(&AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic-2": { { Partition: 0, Err: ErrNoError, }, }, }, }) produceResponse := new(ProduceResponse) produceResponse.Version = 3 produceResponse.AddTopicPartition("test-topic-2", 0, ErrNoError) broker.Returns(produceResponse) broker.Returns(&AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: ErrTopicAuthorizationFailed, }, }, }, }) // now broker is closed due to error. will now reopen it broker.Returns(metadataLeader) endTxnResponse := &EndTxnResponse{ Err: ErrNoError, } broker.Returns(endTxnResponse) require.Equal(t, ProducerTxnFlagReady, producer.txnmgr.status) err = producer.BeginTxn() require.NoError(t, err) require.Equal(t, ProducerTxnFlagInTransaction, producer.txnmgr.status) producer.Input() <- &ProducerMessage{Topic: "test-topic-2", Partition: 0, Key: nil, Value: StringEncoder(TestMessage)} <-producer.Successes() producer.Input() <- &ProducerMessage{Topic: "test-topic", Partition: 0, Key: nil, Value: StringEncoder(TestMessage)} err = producer.CommitTxn() require.Error(t, err) require.NotEqual(t, producer.txnmgr.status&ProducerTxnFlagAbortableError, 0) err = producer.AbortTxn() require.NoError(t, err) } // This example shows how to use the producer while simultaneously // reading the Errors channel to know about any failures. func ExampleAsyncProducer_select() { producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) if err != nil { panic(err) } defer func() { if err := producer.Close(); err != nil { log.Fatalln(err) } }() // Trap SIGINT to trigger a shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) var enqueued, producerErrors int ProducerLoop: for { select { case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: enqueued++ case err := <-producer.Errors(): log.Println("Failed to produce message", err) producerErrors++ case <-signals: break ProducerLoop } } log.Printf("Enqueued: %d; errors: %d\n", enqueued, producerErrors) } // This example shows how to use the producer with separate goroutines // reading from the Successes and Errors channels. Note that in order // for the Successes channel to be populated, you have to set // config.Producer.Return.Successes to true. func ExampleAsyncProducer_goroutines() { config := NewTestConfig() config.Producer.Return.Successes = true producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) if err != nil { panic(err) } // Trap SIGINT to trigger a graceful shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) var ( wg sync.WaitGroup enqueued, successes, producerErrors int ) wg.Add(1) go func() { defer wg.Done() for range producer.Successes() { successes++ } }() wg.Add(1) go func() { defer wg.Done() for err := range producer.Errors() { log.Println(err) producerErrors++ } }() ProducerLoop: for { message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} select { case producer.Input() <- message: enqueued++ case <-signals: producer.AsyncClose() // Trigger a shutdown of the producer. break ProducerLoop } } wg.Wait() log.Printf("Successfully produced: %d; errors: %d\n", successes, producerErrors) } golang-github-ibm-sarama-1.43.2/balance_strategy.go000066400000000000000000001277571461256741300222310ustar00rootroot00000000000000package sarama import ( "container/heap" "errors" "fmt" "math" "sort" "strings" ) const ( // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy RangeBalanceStrategyName = "range" // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy RoundRobinBalanceStrategyName = "roundrobin" // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy StickyBalanceStrategyName = "sticky" defaultGeneration = -1 ) // BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. // It contains an allocation of topic/partitions by memberID in the form of // a `memberID -> topic -> partitions` map. type BalanceStrategyPlan map[string]map[string][]int32 // Add assigns a topic with a number partitions to a member. func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { if len(partitions) == 0 { return } if _, ok := p[memberID]; !ok { p[memberID] = make(map[string][]int32, 1) } p[memberID][topic] = append(p[memberID][topic], partitions...) } // -------------------------------------------------------------------- // BalanceStrategy is used to balance topics and partitions // across members of a consumer group type BalanceStrategy interface { // Name uniquely identifies the strategy. Name() string // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` // and returns a distribution plan. Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) // AssignmentData returns the serialized assignment data for the specified // memberID AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) } // -------------------------------------------------------------------- // NewBalanceStrategyRange returns a range balance strategy, // which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // // Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2): // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} // M2: {T1: [3, 4, 5], T2: [3, 4, 5]} func NewBalanceStrategyRange() BalanceStrategy { return &balanceStrategy{ name: RangeBalanceStrategyName, coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { partitionsPerConsumer := len(partitions) / len(memberIDs) consumersWithExtraPartition := len(partitions) % len(memberIDs) sort.Strings(memberIDs) for i, memberID := range memberIDs { min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) extra := 0 if i < consumersWithExtraPartition { extra = 1 } max := min + partitionsPerConsumer + extra plan.Add(memberID, topic, partitions[min:max]...) } }, } } // Deprecated: use NewBalanceStrategyRange to avoid data race issue var BalanceStrategyRange = NewBalanceStrategyRange() // NewBalanceStrategySticky returns a sticky balance strategy, // which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // // M1: {T: [0, 2, 4]} // M2: {T: [1, 3, 5]} // // On reassignment with an additional consumer, you might get an assignment plan like: // // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} func NewBalanceStrategySticky() BalanceStrategy { return &stickyBalanceStrategy{} } // Deprecated: use NewBalanceStrategySticky to avoid data race issue var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) name string } // Name implements BalanceStrategy. func (s *balanceStrategy) Name() string { return s.name } // Plan implements BalanceStrategy. func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { // Build members by topic map mbt := make(map[string][]string) for memberID, meta := range members { for _, topic := range meta.Topics { mbt[topic] = append(mbt[topic], memberID) } } // func to sort and de-duplicate a StringSlice uniq := func(ss sort.StringSlice) []string { if ss.Len() < 2 { return ss } sort.Sort(ss) var i, j int for i = 1; i < ss.Len(); i++ { if ss[i] == ss[j] { continue } j++ ss.Swap(i, j) } return ss[:j+1] } // Assemble plan plan := make(BalanceStrategyPlan, len(members)) for topic, memberIDs := range mbt { s.coreFn(plan, uniq(memberIDs), topic, topics[topic]) } return plan, nil } // AssignmentData simple strategies do not require any shared assignment data func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { return nil, nil } type stickyBalanceStrategy struct { movements partitionMovements } // Name implements BalanceStrategy. func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName } // Plan implements BalanceStrategy. func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { // track partition movements during generation of the partition assignment plan s.movements = partitionMovements{ Movements: make(map[topicPartitionAssignment]consumerPair), PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool), } // prepopulate the current assignment state from userdata on the consumer group members currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members) if err != nil { return nil, err } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) for topic, partitions := range topics { for _, partition := range partitions { partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{} } } // create a mapping of all consumers to all potential topic partitions that can be assigned to them // also, populate the mapping of partitions to potential consumers consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members)) for memberID, meta := range members { consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0) for _, topicSubscription := range meta.Topics { // only evaluate topic subscriptions that are present in the supplied topics map if _, found := topics[topicSubscription]; found { for _, partition := range topics[topicSubscription] { topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition} consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition) partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID) } } } // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist if _, exists := currentAssignment[memberID]; !exists { currentAssignment[memberID] = make([]topicPartitionAssignment, 0) } } // create a mapping of each partition to its current consumer, where possible currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment)) unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) for partition := range partition2AllPotentialConsumers { unvisitedPartitions[partition] = true } var unassignedPartitions []topicPartitionAssignment for memberID, partitions := range currentAssignment { var keepPartitions []topicPartitionAssignment for _, partition := range partitions { // If this partition no longer exists at all, likely due to the // topic being deleted, we remove the partition from the member. if _, exists := partition2AllPotentialConsumers[partition]; !exists { continue } delete(unvisitedPartitions, partition) currentPartitionConsumers[partition] = memberID if !strsContains(members[memberID].Topics, partition.Topic) { unassignedPartitions = append(unassignedPartitions, partition) continue } keepPartitions = append(keepPartitions, partition) } currentAssignment[memberID] = keepPartitions } for unvisited := range unvisitedPartitions { unassignedPartitions = append(unassignedPartitions, unvisited) } // sort the topic partitions in order of priority for reassignment sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions) // at this point we have preserved all valid topic partition to consumer assignments and removed // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions // to consumers so that the topic partition assignments are as balanced as possible. // an ascending sorted set of consumers based on how many topic partitions are already assigned to them sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers) // Assemble plan plan := make(BalanceStrategyPlan, len(currentAssignment)) for memberID, assignments := range currentAssignment { if len(assignments) == 0 { plan[memberID] = make(map[string][]int32) } else { for _, assignment := range assignments { plan.Add(memberID, assignment.Topic, assignment.Partition) } } } return plan, nil } // AssignmentData serializes the set of topics currently assigned to the // specified member as part of the supplied balance plan func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { return encode(&StickyAssignorUserDataV1{ Topics: topics, Generation: generationID, }, nil) } func strsContains(s []string, value string) bool { for _, entry := range s { if entry == value { return true } } return false } // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { // skip if there is no potential consumer for the partition if len(partition2AllPotentialConsumers[partition]) == 0 { continue } sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer) } // narrow down the reassignment scope to only those partitions that can actually be reassigned for partition := range partition2AllPotentialConsumers { if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition) } } // narrow down the reassignment scope to only those consumers that are subject to reassignment fixedAssignments := make(map[string][]topicPartitionAssignment) for memberID := range consumer2AllPotentialPartitions { if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) { fixedAssignments[memberID] = currentAssignment[memberID] delete(currentAssignment, memberID) sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) } } // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later preBalanceAssignment := deepCopyAssignment(currentAssignment) preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer)) for k, v := range currentPartitionConsumer { preBalancePartitionConsumers[k] = v } reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer) // if we are not preserving existing assignments and we have made changes to the current assignment // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) { currentAssignment = deepCopyAssignment(preBalanceAssignment) currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers)) for k, v := range preBalancePartitionConsumers { currentPartitionConsumer[k] = v } } // add the fixed assignments (those that could not change) back for consumer, assignments := range fixedAssignments { currentAssignment[consumer] = assignments } } // NewBalanceStrategyRoundRobin returns a round-robin balance strategy, // which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] func NewBalanceStrategyRoundRobin() BalanceStrategy { return new(roundRobinBalancer) } // Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} func (b *roundRobinBalancer) Name() string { return RoundRobinBalanceStrategyName } func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { if len(memberAndMetadata) == 0 || len(topics) == 0 { return nil, errors.New("members and topics are not provided") } // sort partitions var topicPartitions []topicAndPartition for topic, partitions := range topics { for _, partition := range partitions { topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition}) } } sort.SliceStable(topicPartitions, func(i, j int) bool { pi := topicPartitions[i] pj := topicPartitions[j] return pi.comparedValue() < pj.comparedValue() }) // sort members var members []memberAndTopic for memberID, meta := range memberAndMetadata { m := memberAndTopic{ memberID: memberID, topics: make(map[string]struct{}), } for _, t := range meta.Topics { m.topics[t] = struct{}{} } members = append(members, m) } sort.SliceStable(members, func(i, j int) bool { mi := members[i] mj := members[j] return mi.memberID < mj.memberID }) // assign partitions plan := make(BalanceStrategyPlan, len(members)) i := 0 n := len(members) for _, tp := range topicPartitions { m := members[i%n] for !m.hasTopic(tp.topic) { i++ m = members[i%n] } plan.Add(m.memberID, tp.topic, tp.partition) i++ } return plan, nil } func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { return nil, nil // do nothing for now } type topicAndPartition struct { topic string partition int32 } func (tp *topicAndPartition) comparedValue() string { return fmt.Sprintf("%s-%d", tp.topic, tp.partition) } type memberAndTopic struct { topics map[string]struct{} memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { _, isExist := m.topics[topic] return isExist } // Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. // A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. // Lower balance score indicates a more balanced assignment. func getBalanceScore(assignment map[string][]topicPartitionAssignment) int { consumer2AssignmentSize := make(map[string]int, len(assignment)) for memberID, partitions := range assignment { consumer2AssignmentSize[memberID] = len(partitions) } var score float64 for memberID, consumerAssignmentSize := range consumer2AssignmentSize { delete(consumer2AssignmentSize, memberID) for _, otherConsumerAssignmentSize := range consumer2AssignmentSize { score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize)) } } return int(score) } // Determine whether the current assignment plan is balanced. func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool { sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) min := len(currentAssignment[sortedCurrentSubscriptions[0]]) max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]]) if min >= max-1 { // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true return true } // create a mapping from partitions to the consumer assigned to them allPartitions := make(map[topicPartitionAssignment]string) for memberID, partitions := range currentAssignment { for _, partition := range partitions { if _, exists := allPartitions[partition]; exists { Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition) } allPartitions[partition] = memberID } } // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it // could but did not get cannot be moved to it (because that would break the balance) for _, memberID := range sortedCurrentSubscriptions { consumerPartitions := currentAssignment[memberID] consumerPartitionCount := len(consumerPartitions) // skip if this consumer already has all the topic partitions it can get if consumerPartitionCount == len(allSubscriptions[memberID]) { continue } // otherwise make sure it cannot get any more potentialTopicPartitions := allSubscriptions[memberID] for _, partition := range potentialTopicPartitions { if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) { otherConsumer := allPartitions[partition] otherConsumerPartitionCount := len(currentAssignment[otherConsumer]) if consumerPartitionCount < otherConsumerPartitionCount { return false } } } } return true } // Reassign all topic partitions that need reassignment until balanced. func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool { reassignmentPerformed := false modified := false // repeat reassignment until no partition can be moved to improve the balance for { modified = false // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) // until the full list is processed or a balance is achieved for _, partition := range reassignablePartitions { if isBalanced(currentAssignment, consumer2AllPotentialPartitions) { break } // the partition must have at least two consumers if len(partition2AllPotentialConsumers[partition]) <= 1 { Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition) } // the partition must have a consumer consumer := currentPartitionConsumer[partition] if consumer == "" { Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition) } if _, exists := prevAssignment[partition]; exists { if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) { sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID) reassignmentPerformed = true modified = true continue } } // check if a better-suited consumer exists for the partition; if so, reassign it for _, otherConsumer := range partition2AllPotentialConsumers[partition] { if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) { sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions) reassignmentPerformed = true modified = true break } } } if !modified { return reassignmentPerformed } } } // Identify a new consumer for a topic partition and reassign it. func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string { for _, anotherConsumer := range sortedCurrentSubscriptions { if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) { return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer) } } return sortedCurrentSubscriptions } // Reassign a specific partition to a new consumer func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string { consumer := currentPartitionConsumer[partition] // find the correct partition movement considering the stickiness requirement partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer) return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer) } // Track the movement of a topic partition after assignment func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string { oldConsumer := currentPartitionConsumer[partition] s.movements.movePartition(partition, oldConsumer, newConsumer) currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition) currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition) currentPartitionConsumer[partition] = newConsumer return sortMemberIDsByPartitionAssignments(currentAssignment) } // Determine whether a specific consumer should be considered for topic partition assignment. func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { currentPartitions := currentAssignment[memberID] currentAssignmentSize := len(currentPartitions) maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID]) if currentAssignmentSize > maxAssignmentSize { Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID) } if currentAssignmentSize < maxAssignmentSize { // if a consumer is not assigned all its potential partitions it is subject to reassignment return true } for _, partition := range currentPartitions { if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { return true } } return false } // Only consider reassigning those topic partitions that have two or more potential consumers. func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { return len(partition2AllPotentialConsumers[partition]) >= 2 } // The assignment should improve the overall balance of the partition assignments to consumers. func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string { for _, memberID := range sortedCurrentSubscriptions { if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) { currentAssignment[memberID] = append(currentAssignment[memberID], partition) currentPartitionConsumer[partition] = memberID break } } return sortMemberIDsByPartitionAssignments(currentAssignment) } // Deserialize topic partition assignment data to aid with creation of a sticky assignment. func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { userDataV1 := &StickyAssignorUserDataV1{} if err := decode(userDataBytes, userDataV1, nil); err != nil { userDataV0 := &StickyAssignorUserDataV0{} if err := decode(userDataBytes, userDataV0, nil); err != nil { return nil, err } return userDataV0, nil } return userDataV1, nil } // filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited // to those topic partitions currently reported by the Kafka cluster. func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment { assignments := deepCopyAssignment(currentAssignment) for memberID, partitions := range assignments { // perform in-place filtering i := 0 for _, partition := range partitions { if _, exists := partition2AllPotentialConsumers[partition]; exists { partitions[i] = partition i++ } } assignments[memberID] = partitions[:i] } return assignments } func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment { for i, assignment := range assignments { if assignment == topic { return append(assignments[:i], assignments[i+1:]...) } } return assignments } func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool { for _, assignment := range assignments { if assignment == topic { return true } } return false } func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment { unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) for partition := range partition2AllPotentialConsumers { unassignedPartitions[partition] = true } sortedPartitions := make([]topicPartitionAssignment, 0) if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) { // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics) // then we just need to simply list partitions in a round robin fashion (from consumers with // most assigned partitions to those with least) assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers) // use priority-queue to evaluate consumer group members in descending-order based on // the number of topic partition assignments (i.e. consumers with most assignments first) pq := make(assignmentPriorityQueue, len(assignments)) i := 0 for consumerID, consumerAssignments := range assignments { pq[i] = &consumerGroupMember{ id: consumerID, assignments: consumerAssignments, } i++ } heap.Init(&pq) // loop until no consumer-group members remain for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time var prevPartitionIndex int for i, partition := range member.assignments { if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists { prevPartitionIndex = i break } } if len(member.assignments) > 0 { partition := member.assignments[prevPartitionIndex] sortedPartitions = append(sortedPartitions, partition) delete(unassignedPartitions, partition) if prevPartitionIndex == 0 { member.assignments = member.assignments[1:] } else { member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...) } heap.Fix(&pq, 0) } else { heap.Pop(&pq) } } for partition := range unassignedPartitions { sortedPartitions = append(sortedPartitions, partition) } } else { // an ascending sorted set of topic partitions based on how many consumers can potentially use them sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers) } return sortedPartitions } func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string { // sort the members by the number of partition assignments in ascending order sortedMemberIDs := make([]string, 0, len(assignments)) for memberID := range assignments { sortedMemberIDs = append(sortedMemberIDs, memberID) } sort.SliceStable(sortedMemberIDs, func(i, j int) bool { ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]]) if ret == 0 { return sortedMemberIDs[i] < sortedMemberIDs[j] } return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]]) }) return sortedMemberIDs } func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment { // sort the members by the number of partition assignments in descending order sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers)) i := 0 for partition := range partition2AllPotentialConsumers { sortedPartionIDs[i] = partition i++ } sort.Slice(sortedPartionIDs, func(i, j int) bool { if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) { ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic) if ret == 0 { return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition } return ret < 0 } return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) }) return sortedPartionIDs } func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { m := make(map[string][]topicPartitionAssignment, len(assignment)) for memberID, subscriptions := range assignment { m[memberID] = append(subscriptions[:0:0], subscriptions...) } return m } func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool { curMembers := make(map[string]int) for _, cur := range partition2AllPotentialConsumers { if len(curMembers) == 0 { for _, curMembersElem := range cur { curMembers[curMembersElem]++ } continue } if len(curMembers) != len(cur) { return false } yMap := make(map[string]int) for _, yElem := range cur { yMap[yElem]++ } for curMembersMapKey, curMembersMapVal := range curMembers { if yMap[curMembersMapKey] != curMembersMapVal { return false } } } curPartitions := make(map[topicPartitionAssignment]int) for _, cur := range consumer2AllPotentialPartitions { if len(curPartitions) == 0 { for _, curPartitionElem := range cur { curPartitions[curPartitionElem]++ } continue } if len(curPartitions) != len(cur) { return false } yMap := make(map[topicPartitionAssignment]int) for _, yElem := range cur { yMap[yElem]++ } for curMembersMapKey, curMembersMapVal := range curPartitions { if yMap[curMembersMapKey] != curMembersMapVal { return false } } } return true } // We need to process subscriptions' user data with each consumer's reported generation in mind // higher generations overwrite lower generations in case of a conflict // note that a conflict could exist only if user data is for different generations func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) { currentAssignment := make(map[string][]topicPartitionAssignment) prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair) // for each partition we create a sorted map of its consumers by generation sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string) for memberID, meta := range members { consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData) if err != nil { return nil, nil, err } for _, partition := range consumerUserData.partitions() { if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists { if consumerUserData.hasGeneration() { if _, generationExists := consumers[consumerUserData.generation()]; generationExists { // same partition is assigned to two consumers during the same rebalance. // log a warning and skip this record Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation()) continue } else { consumers[consumerUserData.generation()] = memberID } } else { consumers[defaultGeneration] = memberID } } else { generation := defaultGeneration if consumerUserData.hasGeneration() { generation = consumerUserData.generation() } sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID} } } } // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition // current and previous consumers are the last two consumers of each partition in the above sorted map for partition, consumers := range sortedPartitionConsumersByGeneration { // sort consumers by generation in decreasing order var generations []int for generation := range consumers { generations = append(generations, generation) } sort.Sort(sort.Reverse(sort.IntSlice(generations))) consumer := consumers[generations[0]] if _, exists := currentAssignment[consumer]; !exists { currentAssignment[consumer] = []topicPartitionAssignment{partition} } else { currentAssignment[consumer] = append(currentAssignment[consumer], partition) } // check for previous assignment, if any if len(generations) > 1 { prevAssignment[partition] = consumerGenerationPair{ MemberID: consumers[generations[1]], Generation: generations[1], } } } return currentAssignment, prevAssignment, nil } type consumerGenerationPair struct { MemberID string Generation int } // consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment. type consumerPair struct { SrcMemberID string DstMemberID string } // partitionMovements maintains some data structures to simplify lookup of partition movements among consumers. type partitionMovements struct { PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool Movements map[topicPartitionAssignment]consumerPair } func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair { pair := p.Movements[partition] delete(p.Movements, partition) partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] delete(partitionMovementsForThisTopic[pair], partition) if len(partitionMovementsForThisTopic[pair]) == 0 { delete(partitionMovementsForThisTopic, pair) } if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 { delete(p.PartitionMovementsByTopic, partition.Topic) } return pair } func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) { p.Movements[partition] = pair if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool) } partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] if _, exists := partitionMovementsForThisTopic[pair]; !exists { partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool) } partitionMovementsForThisTopic[pair][partition] = true } func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) { pair := consumerPair{ SrcMemberID: oldConsumer, DstMemberID: newConsumer, } if _, exists := p.Movements[partition]; exists { // this partition has previously moved existingPair := p.removeMovementRecordOfPartition(partition) if existingPair.DstMemberID != oldConsumer { Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer) } if existingPair.SrcMemberID != newConsumer { // the partition is not moving back to its previous consumer p.addPartitionMovementRecord(partition, consumerPair{ SrcMemberID: existingPair.SrcMemberID, DstMemberID: newConsumer, }) } } else { p.addPartitionMovementRecord(partition, pair) } } func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment { if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { return partition } if _, exists := p.Movements[partition]; exists { // this partition has previously moved if oldConsumer != p.Movements[partition].DstMemberID { Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer) } oldConsumer = p.Movements[partition].SrcMemberID } partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] reversePair := consumerPair{ SrcMemberID: newConsumer, DstMemberID: oldConsumer, } if _, exists := partitionMovementsForThisTopic[reversePair]; !exists { return partition } var reversePairPartition topicPartitionAssignment for otherPartition := range partitionMovementsForThisTopic[reversePair] { reversePairPartition = otherPartition } return reversePairPartition } func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { if src == dst { return currentPath, false } if len(pairs) == 0 { return currentPath, false } for _, pair := range pairs { if src == pair.SrcMemberID && dst == pair.DstMemberID { currentPath = append(currentPath, src, dst) return currentPath, true } } for _, pair := range pairs { if pair.SrcMemberID != src { continue } // create a deep copy of the pairs, excluding the current pair reducedSet := make([]consumerPair, len(pairs)-1) i := 0 for _, p := range pairs { if p != pair { reducedSet[i] = pair i++ } } currentPath = append(currentPath, pair.SrcMemberID) return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) } return currentPath, false } func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { superCycle := make([]string, len(cycle)-1) for i := 0; i < len(cycle)-1; i++ { superCycle[i] = cycle[i] } superCycle = append(superCycle, cycle...) for _, foundCycle := range cycles { if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { return true } } return false } func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { cycles := make([][]string, 0) for _, pair := range pairs { // create a deep copy of the pairs, excluding the current pair reducedPairs := make([]consumerPair, len(pairs)-1) i := 0 for _, p := range pairs { if p != pair { reducedPairs[i] = pair i++ } } if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked { if !p.in(path, cycles) { cycles = append(cycles, path) Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path) } } } // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. for _, cycle := range cycles { if len(cycle) == 3 { return true } } return false } func (p *partitionMovements) isSticky() bool { for topic, movements := range p.PartitionMovementsByTopic { movementPairs := make([]consumerPair, len(movements)) i := 0 for pair := range movements { movementPairs[i] = pair i++ } if p.hasCycles(movementPairs) { Logger.Printf("Stickiness is violated for topic %s", topic) Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements) return false } } return true } func indexOfSubList(source []string, target []string) int { targetSize := len(target) maxCandidate := len(source) - targetSize nextCand: for candidate := 0; candidate <= maxCandidate; candidate++ { j := candidate for i := 0; i < targetSize; i++ { if target[i] != source[j] { // Element mismatch, try next cand continue nextCand } j++ } // All elements of candidate matched target return candidate } return -1 } type consumerGroupMember struct { id string assignments []topicPartitionAssignment } // assignmentPriorityQueue is a priority-queue of consumer group members that is sorted // in descending order (most assignments to least assignments). type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { return pq[i].id > pq[j].id } return len(pq[i].assignments) > len(pq[j].assignments) } func (pq assignmentPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } func (pq *assignmentPriorityQueue) Push(x interface{}) { member := x.(*consumerGroupMember) *pq = append(*pq, member) } func (pq *assignmentPriorityQueue) Pop() interface{} { old := *pq n := len(old) member := old[n-1] *pq = old[0 : n-1] return member } golang-github-ibm-sarama-1.43.2/balance_strategy_test.go000066400000000000000000002223331461256741300232520ustar00rootroot00000000000000package sarama import ( "bytes" "fmt" "math" "math/rand" "reflect" "sort" "testing" "time" ) func TestBalanceStrategyRange(t *testing.T) { tests := []struct { name string members map[string][]string topics map[string][]int32 expected BalanceStrategyPlan }{ { name: "2 members, 2 topics, 4 partitions each", members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, topics: map[string][]int32{"T1": {0, 1, 2, 3}, "T2": {0, 1, 2, 3}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0, 1}, "T2": {0, 1}}, "M2": map[string][]int32{"T1": {2, 3}, "T2": {2, 3}}, }, }, { name: "2 members, 2 topics, 4 partitions each (different member ids)", members: map[string][]string{"M3": {"T1", "T2"}, "M4": {"T1", "T2"}}, topics: map[string][]int32{"T1": {0, 1, 2, 3}, "T2": {0, 1, 2, 3}}, expected: BalanceStrategyPlan{ "M3": map[string][]int32{"T1": {0, 1}, "T2": {0, 1}}, "M4": map[string][]int32{"T1": {2, 3}, "T2": {2, 3}}, }, }, { name: "3 members, 1 topic, 1 partition each", members: map[string][]string{"M1": {"T1"}, "M2": {"T1"}, "M3": {"T1"}}, topics: map[string][]int32{"T1": {0}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}}, }, }, { name: "2 members, 2 topics, 3 partitions each", members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, topics: map[string][]int32{"T1": {0, 1, 2}, "T2": {0, 1, 2}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0, 1}, "T2": {0, 1}}, "M2": map[string][]int32{"T1": {2}, "T2": {2}}, }, }, { name: "2 members, 2 topics, different subscriptions", members: map[string][]string{"M1": {"T1"}, "M2": {"T1", "T2"}}, topics: map[string][]int32{"T1": {0, 1}, "T2": {0, 1}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}}, "M2": map[string][]int32{"T1": {1}, "T2": {0, 1}}, }, }, { name: "2 members, 1 topic with duplicate assignments, 8 partitions each", members: map[string][]string{"M1": {"T1", "T1", "T1", "T1", "T1", "T1", "T1", "T1"}, "M2": {"T1", "T1", "T1", "T1", "T1", "T1", "T1", "T1"}}, topics: map[string][]int32{"T1": {0, 1, 2, 3, 4, 5, 6, 7}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0, 1, 2, 3}}, "M2": map[string][]int32{"T1": {4, 5, 6, 7}}, }, }, } strategy := NewBalanceStrategyRange() if strategy.Name() != "range" { t.Errorf("Unexpected stategy name\nexpected: range\nactual: %v", strategy.Name()) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { members := make(map[string]ConsumerGroupMemberMetadata) for memberID, topics := range test.members { members[memberID] = ConsumerGroupMemberMetadata{Topics: topics} } actual, err := strategy.Plan(members, test.topics) if err != nil { t.Errorf("Unexpected error %v", err) } else if !reflect.DeepEqual(actual, test.expected) { t.Errorf("Plan does not match expectation\nexpected: %#v\nactual: %#v", test.expected, actual) } }) } } func TestBalanceStrategyRangeAssignmentData(t *testing.T) { strategy := NewBalanceStrategyRange() members := make(map[string]ConsumerGroupMemberMetadata, 2) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } actual, err := strategy.AssignmentData("consumer1", map[string][]int32{"topic1": {0, 1}}, 1) if err != nil { t.Errorf("Error building assignment data: %v", err) } if actual != nil { t.Error("Invalid assignment data returned from AssignmentData") } } func TestBalanceStrategyRoundRobin(t *testing.T) { tests := []struct { members map[string][]string topics map[string][]int32 expected BalanceStrategyPlan }{ { members: map[string][]string{"M1": {"T1", "T2", "T3"}, "M2": {"T1", "T2", "T3"}}, topics: map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}, "T3": {0}}, "M2": map[string][]int32{"T2": {0}}, }, }, { members: map[string][]string{"M1": {"T1", "T2", "T3"}, "M2": {"T1", "T2", "T3"}}, topics: map[string][]int32{"T1": {0}, "T2": {0, 1}, "T3": {0, 1, 2, 3}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}, "T2": {1}, "T3": {1, 3}}, "M2": map[string][]int32{"T2": {0}, "T3": {0, 2}}, }, }, { members: map[string][]string{"M1": {"T1"}, "M2": {"T1"}}, topics: map[string][]int32{"T1": {0}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}}, }, }, { members: map[string][]string{"M1": {"T1", "T2", "T3"}}, topics: map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0, 1, 2}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0, 1, 2}}, }, }, { members: map[string][]string{"M1": {"T1", "T2", "T3"}, "M2": {"T1"}}, topics: map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0}}, }, }, { members: map[string][]string{"M1": {"T1", "T2", "T3"}, "M2": {"T1", "T3"}}, topics: map[string][]int32{"T1": {0}, "T2": {0}, "T3": {0}}, expected: BalanceStrategyPlan{ "M1": map[string][]int32{"T1": {0}, "T2": {0}}, "M2": map[string][]int32{"T3": {0}}, }, }, { members: map[string][]string{"M": {"T1", "T2", "TT2"}, "M2": {"T1", "T2", "TT2"}, "M3": {"T1", "T2", "TT2"}}, topics: map[string][]int32{"T1": {0}, "T2": {0}, "TT2": {0}}, expected: BalanceStrategyPlan{ "M": map[string][]int32{"T1": {0}}, "M2": map[string][]int32{"T2": {0}}, "M3": map[string][]int32{"TT2": {0}}, }, }, } strategy := NewBalanceStrategyRoundRobin() if strategy.Name() != "roundrobin" { t.Errorf("Unexpected strategy name\nexpected: roundrobin\nactual: %v", strategy.Name()) } for _, test := range tests { members := make(map[string]ConsumerGroupMemberMetadata) for memberID, topics := range test.members { members[memberID] = ConsumerGroupMemberMetadata{Topics: topics} } actual, err := strategy.Plan(members, test.topics) if err != nil { t.Errorf("Unexpected error %v", err) } else if !reflect.DeepEqual(actual, test.expected) { t.Errorf("Plan does not match expectation\nexpected: %#v\nactual: %#v", test.expected, actual) } } } func Test_deserializeTopicPartitionAssignment(t *testing.T) { type args struct { userDataBytes []byte } tests := []struct { name string args args want StickyAssignorUserData wantErr bool }{ { name: "Nil userdata bytes", args: args{}, want: &StickyAssignorUserDataV1{}, }, { name: "Non-empty invalid userdata bytes", args: args{ userDataBytes: []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', }, }, wantErr: true, }, { name: "Valid v0 userdata bytes", args: args{ userDataBytes: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, }, }, want: &StickyAssignorUserDataV0{ Topics: map[string][]int32{"t03": {5}}, topicPartitions: []topicPartitionAssignment{ { Topic: "t03", Partition: 5, }, }, }, }, { name: "Valid v1 userdata bytes", args: args{ userDataBytes: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xff, 0xff, 0xff, 0xff, }, }, want: &StickyAssignorUserDataV1{ Topics: map[string][]int32{"t06": {0, 4}}, Generation: -1, topicPartitions: []topicPartitionAssignment{ { Topic: "t06", Partition: 0, }, { Topic: "t06", Partition: 4, }, }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { got, err := deserializeTopicPartitionAssignment(tt.args.userDataBytes) if (err != nil) != tt.wantErr { t.Errorf("deserializeTopicPartitionAssignment() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("deserializeTopicPartitionAssignment() = %v, want %v", got, tt.want) } }) } } func TestBalanceStrategyRoundRobinAssignmentData(t *testing.T) { strategy := NewBalanceStrategyRoundRobin() members := make(map[string]ConsumerGroupMemberMetadata, 2) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } actual, err := strategy.AssignmentData("consumer1", map[string][]int32{"topic1": {0, 1}}, 1) if err != nil { t.Errorf("Error building assignment data: %v", err) } if actual != nil { t.Error("Invalid assignment data returned from AssignmentData") } } func Test_prepopulateCurrentAssignments(t *testing.T) { type args struct { members map[string]ConsumerGroupMemberMetadata } tests := []struct { name string args args wantCurrentAssignments map[string][]topicPartitionAssignment wantPrevAssignments map[topicPartitionAssignment]consumerGenerationPair wantErr bool }{ { name: "Empty map", wantCurrentAssignments: map[string][]topicPartitionAssignment{}, wantPrevAssignments: map[topicPartitionAssignment]consumerGenerationPair{}, }, { name: "Single consumer", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "c01": { Version: 2, UserData: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xff, 0xff, 0xff, 0xff, }, }, }, }, wantCurrentAssignments: map[string][]topicPartitionAssignment{ "c01": { { Topic: "t06", Partition: 0, }, { Topic: "t06", Partition: 4, }, }, }, wantPrevAssignments: map[topicPartitionAssignment]consumerGenerationPair{}, }, { name: "Duplicate consumer assignments in metadata", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "c01": { Version: 2, UserData: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xff, 0xff, 0xff, 0xff, }, }, "c02": { Version: 2, UserData: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xff, 0xff, 0xff, 0xff, }, }, }, }, wantCurrentAssignments: map[string][]topicPartitionAssignment{ "c01": { { Topic: "t06", Partition: 0, }, { Topic: "t06", Partition: 4, }, }, }, wantPrevAssignments: map[topicPartitionAssignment]consumerGenerationPair{}, }, { name: "Different generations (5, 6) of consumer assignments in metadata", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "c01": { Version: 2, UserData: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, }, }, "c02": { Version: 2, UserData: []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x74, 0x30, 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x06, }, }, }, }, wantCurrentAssignments: map[string][]topicPartitionAssignment{ "c01": { { Topic: "t06", Partition: 0, }, { Topic: "t06", Partition: 4, }, }, }, wantPrevAssignments: map[topicPartitionAssignment]consumerGenerationPair{ { Topic: "t06", Partition: 0, }: { Generation: 5, MemberID: "c01", }, { Topic: "t06", Partition: 4, }: { Generation: 5, MemberID: "c01", }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { _, gotPrevAssignments, err := prepopulateCurrentAssignments(tt.args.members) if (err != nil) != tt.wantErr { t.Errorf("prepopulateCurrentAssignments() error = %v, wantErr %v", err, tt.wantErr) } if !reflect.DeepEqual(gotPrevAssignments, tt.wantPrevAssignments) { t.Errorf("deserializeTopicPartitionAssignment() prevAssignments = %v, want %v", gotPrevAssignments, tt.wantPrevAssignments) } }) } } func Test_areSubscriptionsIdentical(t *testing.T) { type args struct { partition2AllPotentialConsumers map[topicPartitionAssignment][]string consumer2AllPotentialPartitions map[string][]topicPartitionAssignment } tests := []struct { name string args args want bool }{ { name: "Empty consumers and partitions", args: args{ partition2AllPotentialConsumers: make(map[topicPartitionAssignment][]string), consumer2AllPotentialPartitions: make(map[string][]topicPartitionAssignment), }, want: true, }, { name: "Topic partitions with identical consumer entries", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 2}: {"c1", "c2", "c3"}, }, consumer2AllPotentialPartitions: make(map[string][]topicPartitionAssignment), }, want: true, }, { name: "Topic partitions with mixed up consumer entries", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c3", "c1", "c2"}, }, consumer2AllPotentialPartitions: make(map[string][]topicPartitionAssignment), }, want: true, }, { name: "Topic partitions with different consumer entries", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"cX", "c1", "c2"}, }, consumer2AllPotentialPartitions: make(map[string][]topicPartitionAssignment), }, want: false, }, { name: "Topic partitions with different number of consumer entries", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c1", "c2"}, }, consumer2AllPotentialPartitions: make(map[string][]topicPartitionAssignment), }, want: false, }, { name: "Consumers with identical topic partitions", args: args{ partition2AllPotentialConsumers: make(map[topicPartitionAssignment][]string), consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, }, }, want: true, }, { name: "Consumer2 with mixed up consumer entries", args: args{ partition2AllPotentialConsumers: make(map[topicPartitionAssignment][]string), consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, {Topic: "t1", Partition: 0}}, "c3": {{Topic: "t1", Partition: 2}, {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}}, }, }, want: true, }, { name: "Consumer2 with different consumer entries", args: args{ partition2AllPotentialConsumers: make(map[topicPartitionAssignment][]string), consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, {Topic: "t1", Partition: 0}}, "c3": {{Topic: "tX", Partition: 2}, {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}}, }, }, want: false, }, { name: "Consumer2 with different number of consumer entries", args: args{ partition2AllPotentialConsumers: make(map[topicPartitionAssignment][]string), consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, {Topic: "t1", Partition: 0}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}}, }, }, want: false, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := areSubscriptionsIdentical(tt.args.partition2AllPotentialConsumers, tt.args.consumer2AllPotentialPartitions); got != tt.want { t.Errorf("areSubscriptionsIdentical() = %v, want %v", got, tt.want) } }) } } func Test_sortMemberIDsByPartitionAssignments(t *testing.T) { type args struct { assignments map[string][]topicPartitionAssignment } tests := []struct { name string args args want []string }{ { name: "Null assignments", want: make([]string, 0), }, { name: "Single assignment", args: args{ assignments: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, }, }, want: []string{"c1"}, }, { name: "Multiple assignments with different partition counts", args: args{ assignments: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, "c3": { {Topic: "t1", Partition: 3}, {Topic: "t1", Partition: 4}, {Topic: "t1", Partition: 5}, }, }, }, want: []string{"c1", "c2", "c3"}, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := sortMemberIDsByPartitionAssignments(tt.args.assignments); !reflect.DeepEqual(got, tt.want) { t.Errorf("sortMemberIDsByPartitionAssignments() = %v, want %v", got, tt.want) } }) } } func Test_sortPartitions(t *testing.T) { type args struct { currentAssignment map[string][]topicPartitionAssignment partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair isFreshAssignment bool partition2AllPotentialConsumers map[topicPartitionAssignment][]string consumer2AllPotentialPartitions map[string][]topicPartitionAssignment } tests := []struct { name string args args want []topicPartitionAssignment }{ { name: "Empty everything", want: make([]topicPartitionAssignment, 0), }, { name: "Base case", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, "c3": {{Topic: "t1", Partition: 2}}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c3", "c1", "c2"}, }, }, }, { name: "Partitions assigned to a different consumer last time", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c3", "c1", "c2"}, }, partitionsWithADifferentPreviousAssignment: map[topicPartitionAssignment]consumerGenerationPair{ {Topic: "t1", Partition: 0}: {Generation: 1, MemberID: "c2"}, }, }, }, { name: "Partitions assigned to a different consumer last time", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c3", "c1", "c2"}, }, partitionsWithADifferentPreviousAssignment: map[topicPartitionAssignment]consumerGenerationPair{ {Topic: "t1", Partition: 0}: {Generation: 1, MemberID: "c2"}, }, }, }, { name: "Fresh assignment", args: args{ isFreshAssignment: true, currentAssignment: map[string][]topicPartitionAssignment{}, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c2": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, "c3": {{Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2", "c3"}, {Topic: "t1", Partition: 1}: {"c2", "c3", "c1"}, {Topic: "t1", Partition: 2}: {"c3", "c1", "c2"}, }, partitionsWithADifferentPreviousAssignment: map[topicPartitionAssignment]consumerGenerationPair{ {Topic: "t1", Partition: 0}: {Generation: 1, MemberID: "c2"}, }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { got := sortPartitions(tt.args.currentAssignment, tt.args.partitionsWithADifferentPreviousAssignment, tt.args.isFreshAssignment, tt.args.partition2AllPotentialConsumers, tt.args.consumer2AllPotentialPartitions) if tt.want != nil && !reflect.DeepEqual(got, tt.want) { t.Errorf("sortPartitions() = %v, want %v", got, tt.want) } }) } } func Test_filterAssignedPartitions(t *testing.T) { type args struct { currentAssignment map[string][]topicPartitionAssignment partition2AllPotentialConsumers map[topicPartitionAssignment][]string } tests := []struct { name string args args want map[string][]topicPartitionAssignment }{ { name: "All partitions accounted for", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1"}, {Topic: "t1", Partition: 1}: {"c2"}, }, }, want: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, }, }, { name: "One consumer using an unrecognized partition", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1"}, }, }, want: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {}, }, }, { name: "Interleaved consumer removal", args: args{ currentAssignment: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {{Topic: "t1", Partition: 1}}, "c3": {{Topic: "t1", Partition: 2}}, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1"}, {Topic: "t1", Partition: 2}: {"c3"}, }, }, want: map[string][]topicPartitionAssignment{ "c1": {{Topic: "t1", Partition: 0}}, "c2": {}, "c3": {{Topic: "t1", Partition: 2}}, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := filterAssignedPartitions(tt.args.currentAssignment, tt.args.partition2AllPotentialConsumers); !reflect.DeepEqual(got, tt.want) { t.Errorf("filterAssignedPartitions() = %v, want %v", got, tt.want) } }) } } func Test_canConsumerParticipateInReassignment(t *testing.T) { type args struct { memberID string currentAssignment map[string][]topicPartitionAssignment consumer2AllPotentialPartitions map[string][]topicPartitionAssignment partition2AllPotentialConsumers map[topicPartitionAssignment][]string } tests := []struct { name string args args want bool }{ { name: "Consumer has been assigned partitions not available to it", args: args{ memberID: "c1", currentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, "c2": {}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, }, "c2": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1", "c2"}, {Topic: "t1", Partition: 1}: {"c1", "c2"}, {Topic: "t1", Partition: 2}: {"c2"}, }, }, want: true, }, { name: "Consumer has been assigned all available partitions", args: args{ memberID: "c1", currentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, }, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, }, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1"}, {Topic: "t1", Partition: 1}: {"c1"}, }, }, want: false, }, { name: "Consumer has not been assigned all available partitions", args: args{ memberID: "c1", currentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, }, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, }, partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ {Topic: "t1", Partition: 0}: {"c1"}, {Topic: "t1", Partition: 1}: {"c1"}, {Topic: "t1", Partition: 2}: {"c1"}, }, }, want: true, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := canConsumerParticipateInReassignment(tt.args.memberID, tt.args.currentAssignment, tt.args.consumer2AllPotentialPartitions, tt.args.partition2AllPotentialConsumers); got != tt.want { t.Errorf("canConsumerParticipateInReassignment() = %v, want %v", got, tt.want) } }) } } func Test_removeTopicPartitionFromMemberAssignments(t *testing.T) { type args struct { assignments []topicPartitionAssignment topic topicPartitionAssignment } tests := []struct { name string args args want []topicPartitionAssignment }{ { name: "Empty", args: args{ assignments: make([]topicPartitionAssignment, 0), topic: topicPartitionAssignment{Topic: "t1", Partition: 0}, }, want: make([]topicPartitionAssignment, 0), }, { name: "Remove first entry", args: args{ assignments: []topicPartitionAssignment{ {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, topic: topicPartitionAssignment{Topic: "t1", Partition: 0}, }, want: []topicPartitionAssignment{ {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, }, { name: "Remove middle entry", args: args{ assignments: []topicPartitionAssignment{ {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, topic: topicPartitionAssignment{Topic: "t1", Partition: 1}, }, want: []topicPartitionAssignment{ {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 2}, }, }, { name: "Remove last entry", args: args{ assignments: []topicPartitionAssignment{ {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, {Topic: "t1", Partition: 2}, }, topic: topicPartitionAssignment{Topic: "t1", Partition: 2}, }, want: []topicPartitionAssignment{ {Topic: "t1", Partition: 0}, {Topic: "t1", Partition: 1}, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := removeTopicPartitionFromMemberAssignments(tt.args.assignments, tt.args.topic); !reflect.DeepEqual(got, tt.want) { t.Errorf("removeTopicPartitionFromMemberAssignments() = %v, want %v", got, tt.want) } }) } } func Test_assignPartition(t *testing.T) { type args struct { partition topicPartitionAssignment sortedCurrentSubscriptions []string currentAssignment map[string][]topicPartitionAssignment consumer2AllPotentialPartitions map[string][]topicPartitionAssignment currentPartitionConsumer map[topicPartitionAssignment]string } tests := []struct { name string args args want []string wantCurrentAssignment map[string][]topicPartitionAssignment wantCurrentPartitionConsumer map[topicPartitionAssignment]string }{ { name: "Base", args: args{ partition: topicPartitionAssignment{Topic: "t1", Partition: 2}, sortedCurrentSubscriptions: []string{"c3", "c1", "c2"}, currentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": {}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": { {Topic: "t1", Partition: 2}, }, }, currentPartitionConsumer: map[topicPartitionAssignment]string{ {Topic: "t1", Partition: 0}: "c1", {Topic: "t1", Partition: 1}: "c2", }, }, want: []string{"c1", "c2", "c3"}, wantCurrentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": { {Topic: "t1", Partition: 2}, }, }, wantCurrentPartitionConsumer: map[topicPartitionAssignment]string{ {Topic: "t1", Partition: 0}: "c1", {Topic: "t1", Partition: 1}: "c2", {Topic: "t1", Partition: 2}: "c3", }, }, { name: "Unassignable Partition", args: args{ partition: topicPartitionAssignment{Topic: "t1", Partition: 3}, sortedCurrentSubscriptions: []string{"c3", "c1", "c2"}, currentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": {}, }, consumer2AllPotentialPartitions: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": { {Topic: "t1", Partition: 2}, }, }, currentPartitionConsumer: map[topicPartitionAssignment]string{ {Topic: "t1", Partition: 0}: "c1", {Topic: "t1", Partition: 1}: "c2", }, }, want: []string{"c3", "c1", "c2"}, wantCurrentAssignment: map[string][]topicPartitionAssignment{ "c1": { {Topic: "t1", Partition: 0}, }, "c2": { {Topic: "t1", Partition: 1}, }, "c3": {}, }, wantCurrentPartitionConsumer: map[topicPartitionAssignment]string{ {Topic: "t1", Partition: 0}: "c1", {Topic: "t1", Partition: 1}: "c2", }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := assignPartition(tt.args.partition, tt.args.sortedCurrentSubscriptions, tt.args.currentAssignment, tt.args.consumer2AllPotentialPartitions, tt.args.currentPartitionConsumer); !reflect.DeepEqual(got, tt.want) { t.Errorf("assignPartition() = %v, want %v", got, tt.want) } if !reflect.DeepEqual(tt.args.currentAssignment, tt.wantCurrentAssignment) { t.Errorf("assignPartition() currentAssignment = %v, want %v", tt.args.currentAssignment, tt.wantCurrentAssignment) } if !reflect.DeepEqual(tt.args.currentPartitionConsumer, tt.wantCurrentPartitionConsumer) { t.Errorf("assignPartition() currentPartitionConsumer = %v, want %v", tt.args.currentPartitionConsumer, tt.wantCurrentPartitionConsumer) } }) } } func Test_stickyBalanceStrategy_Plan(t *testing.T) { type args struct { members map[string]ConsumerGroupMemberMetadata topics map[string][]int32 } tests := []struct { name string s *stickyBalanceStrategy args args }{ { name: "One consumer with no topics", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer": {}, }, topics: make(map[string][]int32), }, }, { name: "One consumer with non-existent topic", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer": { Topics: []string{"topic"}, }, }, topics: map[string][]int32{ "topic": make([]int32, 0), }, }, }, { name: "One consumer with one topic", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer": { Topics: []string{"topic"}, }, }, topics: map[string][]int32{ "topic": {0, 1, 2}, }, }, }, { name: "Only assigns partitions from subscribed topics", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer": { Topics: []string{"topic"}, }, }, topics: map[string][]int32{ "topic": {0, 1, 2}, "other": {0, 1, 2}, }, }, }, { name: "One consumer with multiple topics", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer": { Topics: []string{"topic1", "topic2"}, }, }, topics: map[string][]int32{ "topic1": {0}, "topic2": {0, 1}, }, }, }, { name: "Two consumers with one topic and one partition", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic"}, }, "consumer2": { Topics: []string{"topic"}, }, }, topics: map[string][]int32{ "topic": {0}, }, }, }, { name: "Two consumers with one topic and two partitions", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic"}, }, "consumer2": { Topics: []string{"topic"}, }, }, topics: map[string][]int32{ "topic": {0, 1}, }, }, }, { name: "Multiple consumers with mixed topic subscriptions", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1"}, }, "consumer2": { Topics: []string{"topic1", "topic2"}, }, "consumer3": { Topics: []string{"topic1"}, }, }, topics: map[string][]int32{ "topic1": {0, 1, 2}, "topic2": {0, 1}, }, }, }, { name: "Two consumers with two topics and six partitions", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1", "topic2"}, }, "consumer2": { Topics: []string{"topic1", "topic2"}, }, }, topics: map[string][]int32{ "topic1": {0, 1, 2}, "topic2": {0, 1, 2}, }, }, }, { name: "Three consumers (two old, one new) with one topic and twelve partitions", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {4, 11, 8, 5, 9, 2}}, 1), }, "consumer2": { Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {1, 3, 0, 7, 10, 6}}, 1), }, "consumer3": { Topics: []string{"topic1"}, }, }, topics: map[string][]int32{ "topic1": {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, }, }, { name: "Three consumers (two old, one new) with one topic and 13 partitions", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {4, 11, 8, 5, 9, 2, 6}}, 1), }, "consumer2": { Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {1, 3, 0, 7, 10, 12}}, 1), }, "consumer3": { Topics: []string{"topic1"}, }, }, topics: map[string][]int32{ "topic1": {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, }, }, }, { name: "One consumer that is no longer subscribed to a topic that it had previously been consuming from", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic2"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0}}, 1), }, }, topics: map[string][]int32{ "topic1": {0}, "topic2": {0}, }, }, }, { name: "Two consumers where one is no longer interested in consuming from a topic that it had been consuming from", args: args{ members: map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic2"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0}}, 1), }, "consumer2": { Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {1}}, 1), }, }, topics: map[string][]int32{ "topic1": {0, 1}, "topic2": {0, 1}, }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { s := &stickyBalanceStrategy{} plan, err := s.Plan(tt.args.members, tt.args.topics) verifyPlanIsBalancedAndSticky(t, s, tt.args.members, plan, err) verifyFullyBalanced(t, plan) }) } } func Test_stickyBalanceStrategy_Plan_KIP54_ExampleOne(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1", "topic2", "topic3", "topic4"}, }, "consumer2": { Topics: []string{"topic1", "topic2", "topic3", "topic4"}, }, "consumer3": { Topics: []string{"topic1", "topic2", "topic3", "topic4"}, }, } topics := map[string][]int32{ "topic1": {0, 1}, "topic2": {0, 1}, "topic3": {0, 1}, "topic4": {0, 1}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) verifyFullyBalanced(t, plan1) // PLAN 2 delete(members, "consumer1") members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2", "topic3", "topic4"}, UserData: encodeSubscriberPlan(t, plan1["consumer2"]), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2", "topic3", "topic4"}, UserData: encodeSubscriberPlan(t, plan1["consumer3"]), } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) } func Test_stickyBalanceStrategy_Plan_KIP54_ExampleTwo(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1"}, }, "consumer2": { Topics: []string{"topic1", "topic2"}, }, "consumer3": { Topics: []string{"topic1", "topic2", "topic3"}, }, } topics := map[string][]int32{ "topic1": {0}, "topic2": {0, 1}, "topic3": {0, 1, 2}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) if len(plan1["consumer1"]["topic1"]) != 1 || len(plan1["consumer2"]["topic2"]) != 2 || len(plan1["consumer3"]["topic3"]) != 3 { t.Error("Incorrect distribution of topic partition assignments") } // PLAN 2 delete(members, "consumer1") members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: members["consumer2"].Topics, UserData: encodeSubscriberPlan(t, plan1["consumer2"]), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: members["consumer3"].Topics, UserData: encodeSubscriberPlan(t, plan1["consumer3"]), } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) if len(plan2["consumer2"]["topic1"]) != 1 || len(plan2["consumer2"]["topic2"]) != 2 || len(plan2["consumer3"]["topic3"]) != 3 { t.Error("Incorrect distribution of topic partition assignments") } } func Test_stickyBalanceStrategy_Plan_KIP54_ExampleThree(t *testing.T) { s := &stickyBalanceStrategy{} topicNames := []string{"topic1", "topic2"} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: topicNames, }, "consumer2": { Topics: topicNames, }, } topics := map[string][]int32{ "topic1": {0, 1}, "topic2": {0, 1}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // PLAN 2 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: topicNames, } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: topicNames, UserData: encodeSubscriberPlan(t, plan1["consumer2"]), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: topicNames, UserData: encodeSubscriberPlan(t, plan1["consumer3"]), } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) } func Test_stickyBalanceStrategy_Plan_AddRemoveConsumerOneTopic(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic"}, }, } topics := map[string][]int32{ "topic": {0, 1, 2}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // PLAN 2 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic"}, UserData: encodeSubscriberPlan(t, plan1["consumer1"]), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic"}, } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) // PLAN 3 delete(members, "consumer1") members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic"}, UserData: encodeSubscriberPlan(t, plan2["consumer2"]), } plan3, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan3, err) } func Test_stickyBalanceStrategy_Plan_PoorRoundRobinAssignmentScenario(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1", "topic2", "topic3", "topic4", "topic5"}, }, "consumer2": { Topics: []string{"topic1", "topic3", "topic5"}, }, "consumer3": { Topics: []string{"topic1", "topic3", "topic5"}, }, "consumer4": { Topics: []string{"topic1", "topic2", "topic3", "topic4", "topic5"}, }, } topics := make(map[string][]int32, 5) for i := 1; i <= 5; i++ { partitions := make([]int32, i%2+1) for j := 0; j < i%2+1; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) } func Test_stickyBalanceStrategy_Plan_AddRemoveTopicTwoConsumers(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := map[string]ConsumerGroupMemberMetadata{ "consumer1": { Topics: []string{"topic1"}, }, "consumer2": { Topics: []string{"topic1"}, }, } topics := map[string][]int32{ "topic1": {0, 1, 2}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) verifyFullyBalanced(t, plan1) // PLAN 2 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlan(t, plan1["consumer1"]), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlan(t, plan1["consumer2"]), } topics["topic2"] = []int32{0, 1, 2} plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) // PLAN 3 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlan(t, plan2["consumer1"]), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlan(t, plan2["consumer2"]), } delete(topics, "topic1") plan3, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan3, err) verifyFullyBalanced(t, plan3) } func Test_stickyBalanceStrategy_Plan_ReassignmentAfterOneConsumerLeaves(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 20; i++ { topics := make([]string, 20) for j := 0; j < 20; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 20) for i := 0; i < 20; i++ { partitions := make([]int32, 20) for j := 0; j < 20; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) for i := 0; i < 20; i++ { topics := make([]string, 20) for j := 0; j < 20; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: members[fmt.Sprintf("consumer%d", i)].Topics, UserData: encodeSubscriberPlan(t, plan1[fmt.Sprintf("consumer%d", i)]), } } delete(members, "consumer10") plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_ReassignmentAfterOneConsumerAdded(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := make(map[string]ConsumerGroupMemberMetadata) for i := 0; i < 10; i++ { members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: []string{"topic1"}} } partitions := make([]int32, 20) for j := 0; j < 20; j++ { partitions[j] = int32(j) } topics := map[string][]int32{"topic1": partitions} plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // add a new consumer members["consumer10"] = ConsumerGroupMemberMetadata{Topics: []string{"topic1"}} plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_SameSubscriptions(t *testing.T) { s := &stickyBalanceStrategy{} // PLAN 1 members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 9; i++ { topics := make([]string, 15) for j := 0; j < 15; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 15) for i := 0; i < 15; i++ { partitions := make([]int32, i) for j := 0; j < i; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // PLAN 2 for i := 0; i < 9; i++ { members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: members[fmt.Sprintf("consumer%d", i)].Topics, UserData: encodeSubscriberPlan(t, plan1[fmt.Sprintf("consumer%d", i)]), } } delete(members, "consumer5") plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_LargeAssignmentWithMultipleConsumersLeaving(t *testing.T) { s := &stickyBalanceStrategy{} r := rand.New(rand.NewSource(time.Now().UnixNano())) // PLAN 1 members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 200; i++ { topics := make([]string, 200) for j := 0; j < 200; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 40) for i := 0; i < 40; i++ { partitionCount := r.Intn(20) partitions := make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) for i := 0; i < 200; i++ { members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: members[fmt.Sprintf("consumer%d", i)].Topics, UserData: encodeSubscriberPlan(t, plan1[fmt.Sprintf("consumer%d", i)]), } } for i := 0; i < 50; i++ { delete(members, fmt.Sprintf("consumer%d", i)) } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_NewSubscription(t *testing.T) { s := &stickyBalanceStrategy{} members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 3; i++ { topics := make([]string, 0) for j := i; j <= 3*i-2; j++ { topics = append(topics, fmt.Sprintf("topic%d", j)) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 5) for i := 1; i < 5; i++ { topics[fmt.Sprintf("topic%d", i)] = []int32{0} } plan1, err := s.Plan(members, topics) if err != nil { t.Errorf("stickyBalanceStrategy.Plan() error = %v", err) return } verifyValidityAndBalance(t, members, plan1) members["consumer0"] = ConsumerGroupMemberMetadata{Topics: []string{"topic1"}} plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_ReassignmentWithRandomSubscriptionsAndChanges(t *testing.T) { r := rand.New(rand.NewSource(time.Now().UnixNano())) minNumConsumers := 20 maxNumConsumers := 40 minNumTopics := 10 maxNumTopics := 20 for round := 0; round < 100; round++ { numTopics := minNumTopics + r.Intn(maxNumTopics-minNumTopics) topics := make([]string, numTopics) partitionsPerTopic := make(map[string][]int32, numTopics) for i := 0; i < numTopics; i++ { topicName := fmt.Sprintf("topic%d", i) topics[i] = topicName partitions := make([]int32, maxNumTopics) for j := 0; j < maxNumTopics; j++ { partitions[j] = int32(j) } partitionsPerTopic[topicName] = partitions } numConsumers := minNumConsumers + r.Intn(maxNumConsumers-minNumConsumers) members := make(map[string]ConsumerGroupMemberMetadata, numConsumers) for i := 0; i < numConsumers; i++ { sub := getRandomSublist(r, topics) sort.Strings(sub) members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: sub} } s := &stickyBalanceStrategy{} plan, err := s.Plan(members, partitionsPerTopic) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) // PLAN 2 membersPlan2 := make(map[string]ConsumerGroupMemberMetadata, numConsumers) for i := 0; i < numConsumers; i++ { sub := getRandomSublist(r, topics) sort.Strings(sub) membersPlan2[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: sub, UserData: encodeSubscriberPlan(t, plan[fmt.Sprintf("consumer%d", i)]), } } plan2, err := s.Plan(membersPlan2, partitionsPerTopic) verifyPlanIsBalancedAndSticky(t, s, membersPlan2, plan2, err) } } func Test_stickyBalanceStrategy_Plan_MoveExistingAssignments(t *testing.T) { s := &stickyBalanceStrategy{} topics := make(map[string][]int32, 6) for i := 1; i <= 6; i++ { topics[fmt.Sprintf("topic%d", i)] = []int32{0} } members := make(map[string]ConsumerGroupMemberMetadata, 3) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2"}, UserData: encodeSubscriberPlan(t, map[string][]int32{"topic1": {0}}), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1", "topic2", "topic3", "topic4"}, UserData: encodeSubscriberPlan(t, map[string][]int32{"topic2": {0}, "topic3": {0}}), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic2", "topic3", "topic4", "topic5", "topic6"}, UserData: encodeSubscriberPlan(t, map[string][]int32{"topic4": {0}, "topic5": {0}, "topic6": {0}}), } plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) } func Test_stickyBalanceStrategy_Plan_Stickiness(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2}} members := map[string]ConsumerGroupMemberMetadata{ "consumer1": {Topics: []string{"topic1"}}, "consumer2": {Topics: []string{"topic1"}}, "consumer3": {Topics: []string{"topic1"}}, "consumer4": {Topics: []string{"topic1"}}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // PLAN 2 // remove the potential group leader delete(members, "consumer1") for i := 2; i <= 4; i++ { members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlan(t, plan1[fmt.Sprintf("consumer%d", i)]), } } plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_AssignmentUpdatedForDeletedTopic(t *testing.T) { s := &stickyBalanceStrategy{} topics := make(map[string][]int32, 2) topics["topic1"] = []int32{0} topics["topic3"] = make([]int32, 100) for i := 0; i < 100; i++ { topics["topic3"][i] = int32(i) } members := map[string]ConsumerGroupMemberMetadata{ "consumer1": {Topics: []string{"topic1", "topic2", "topic3"}}, } plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) verifyFullyBalanced(t, plan) if (len(plan["consumer1"]["topic1"]) + len(plan["consumer1"]["topic3"])) != 101 { t.Error("Incorrect number of partitions assigned") return } } func Test_stickyBalanceStrategy_Plan_NoExceptionRaisedWhenOnlySubscribedTopicDeleted(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2}} members := map[string]ConsumerGroupMemberMetadata{ "consumer1": {Topics: []string{"topic1"}}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) // PLAN 2 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: members["consumer1"].Topics, UserData: encodeSubscriberPlan(t, plan1["consumer1"]), } plan2, err := s.Plan(members, map[string][]int32{}) if len(plan2) != 1 { t.Error("Incorrect number of consumers") return } if len(plan2["consumer1"]) != 0 { t.Error("Incorrect number of consumer topic assignments") return } verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) } func Test_stickyBalanceStrategy_Plan_AssignmentWithMultipleGenerations1(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2, 3, 4, 5}} members := map[string]ConsumerGroupMemberMetadata{ "consumer1": {Topics: []string{"topic1"}}, "consumer2": {Topics: []string{"topic1"}}, "consumer3": {Topics: []string{"topic1"}}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) verifyFullyBalanced(t, plan1) // PLAN 2 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer1"], 1), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer2"], 1), } delete(members, "consumer3") plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) if len(intersection(plan1["consumer1"]["topic1"], plan2["consumer1"]["topic1"])) != 2 { t.Error("stickyBalanceStrategy.Plan() consumer1 didn't maintain partitions across reassignment") } if len(intersection(plan1["consumer2"]["topic1"], plan2["consumer2"]["topic1"])) != 2 { t.Error("stickyBalanceStrategy.Plan() consumer1 didn't maintain partitions across reassignment") } // PLAN 3 delete(members, "consumer1") members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan2["consumer2"], 2), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer3"], 1), } plan3, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan3, err) verifyFullyBalanced(t, plan3) } func Test_stickyBalanceStrategy_Plan_AssignmentWithMultipleGenerations2(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2, 3, 4, 5}} members := map[string]ConsumerGroupMemberMetadata{ "consumer1": {Topics: []string{"topic1"}}, "consumer2": {Topics: []string{"topic1"}}, "consumer3": {Topics: []string{"topic1"}}, } plan1, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan1, err) verifyFullyBalanced(t, plan1) // PLAN 2 delete(members, "consumer1") members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer2"], 1), } delete(members, "consumer3") plan2, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan2, err) verifyFullyBalanced(t, plan2) if len(intersection(plan1["consumer2"]["topic1"], plan2["consumer2"]["topic1"])) != 2 { t.Error("stickyBalanceStrategy.Plan() consumer1 didn't maintain partitions across reassignment") } // PLAN 3 members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer1"], 1), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan2["consumer2"], 2), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, plan1["consumer3"], 1), } plan3, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan3, err) verifyFullyBalanced(t, plan3) } func Test_stickyBalanceStrategy_Plan_AssignmentWithConflictingPreviousGenerations(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2, 3, 4, 5}} members := make(map[string]ConsumerGroupMemberMetadata, 3) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 1, 4}}, 1), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 2, 3}}, 1), } members["consumer3"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {3, 4, 5}}, 2), } plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) verifyFullyBalanced(t, plan) } func Test_stickyBalanceStrategy_Plan_SchemaBackwardCompatibility(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1, 2}} members := make(map[string]ConsumerGroupMemberMetadata, 3) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 2}}, 1), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithOldSchema(t, map[string][]int32{"topic1": {1}}), } members["consumer3"] = ConsumerGroupMemberMetadata{Topics: []string{"topic1"}} plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) verifyFullyBalanced(t, plan) } func Test_stickyBalanceStrategy_Plan_ConflictingPreviousAssignments(t *testing.T) { s := &stickyBalanceStrategy{} topics := map[string][]int32{"topic1": {0, 1}} members := make(map[string]ConsumerGroupMemberMetadata, 2) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 1}}, 1), } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, UserData: encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 1}}, 1), } plan, err := s.Plan(members, topics) verifyPlanIsBalancedAndSticky(t, s, members, plan, err) verifyFullyBalanced(t, plan) } func Test_stickyBalanceStrategy_Plan_AssignmentData(t *testing.T) { s := &stickyBalanceStrategy{} members := make(map[string]ConsumerGroupMemberMetadata, 2) members["consumer1"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } members["consumer2"] = ConsumerGroupMemberMetadata{ Topics: []string{"topic1"}, } expected := encodeSubscriberPlanWithGeneration(t, map[string][]int32{"topic1": {0, 1}}, 1) actual, err := s.AssignmentData("consumer1", map[string][]int32{"topic1": {0, 1}}, 1) if err != nil { t.Errorf("Error building assignment data: %v", err) } if !bytes.Equal(expected, actual) { t.Error("Invalid assignment data returned from AssignmentData") } } func Test_stickyBalanceStrategy_Plan_data_race(t *testing.T) { for i := 0; i < 1000; i++ { go func(bs BalanceStrategy) { members := map[string]ConsumerGroupMemberMetadata{ "m1": { Version: 3, Topics: []string{"topic"}, }, } topics := map[string][]int32{ "topic": {0, 1, 2}, } _, _ = bs.Plan(members, topics) }(NewBalanceStrategySticky()) } } func BenchmarkStickAssignmentWithLargeNumberOfConsumersAndTopics(b *testing.B) { s := &stickyBalanceStrategy{} r := rand.New(rand.NewSource(time.Now().UnixNano())) members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 200; i++ { topics := make([]string, 200) for j := 0; j < 200; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 40) for i := 0; i < 40; i++ { partitionCount := r.Intn(20) partitions := make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } b.ResetTimer() for n := 0; n < b.N; n++ { if _, err := s.Plan(members, topics); err != nil { b.Errorf("Error building plan in benchmark: %v", err) } } } func BenchmarkStickAssignmentWithLargeNumberOfConsumersAndTopicsAndExistingAssignments(b *testing.B) { s := &stickyBalanceStrategy{} r := rand.New(rand.NewSource(time.Now().UnixNano())) members := make(map[string]ConsumerGroupMemberMetadata, 20) for i := 0; i < 200; i++ { topics := make([]string, 200) for j := 0; j < 200; j++ { topics[j] = fmt.Sprintf("topic%d", j) } members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{Topics: topics} } topics := make(map[string][]int32, 40) for i := 0; i < 40; i++ { partitionCount := r.Intn(20) partitions := make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { partitions[j] = int32(j) } topics[fmt.Sprintf("topic%d", i)] = partitions } plan, _ := s.Plan(members, topics) for i := 0; i < 200; i++ { members[fmt.Sprintf("consumer%d", i)] = ConsumerGroupMemberMetadata{ Topics: members[fmt.Sprintf("consumer%d", i)].Topics, UserData: encodeSubscriberPlanWithGenerationForBenchmark(b, plan[fmt.Sprintf("consumer%d", i)], 1), } } for i := 0; i < 1; i++ { delete(members, fmt.Sprintf("consumer%d", i)) } b.ResetTimer() for n := 0; n < b.N; n++ { if _, err := s.Plan(members, topics); err != nil { b.Errorf("Error building plan in benchmark: %v", err) } } } func verifyPlanIsBalancedAndSticky(t *testing.T, s *stickyBalanceStrategy, members map[string]ConsumerGroupMemberMetadata, plan BalanceStrategyPlan, err error) { t.Helper() if err != nil { t.Errorf("stickyBalanceStrategy.Plan() error = %v", err) return } if !s.movements.isSticky() { t.Error("stickyBalanceStrategy.Plan() not sticky") return } verifyValidityAndBalance(t, members, plan) } func verifyValidityAndBalance(t *testing.T, consumers map[string]ConsumerGroupMemberMetadata, plan BalanceStrategyPlan) { t.Helper() size := len(consumers) if size != len(plan) { t.Errorf("Subscription size (%d) not equal to plan size (%d)", size, len(plan)) t.FailNow() } members := make([]string, size) i := 0 for memberID := range consumers { members[i] = memberID i++ } sort.Strings(members) for i, memberID := range members { for assignedTopic := range plan[memberID] { found := false for _, assignableTopic := range consumers[memberID].Topics { if assignableTopic == assignedTopic { found = true break } } if !found { t.Errorf("Consumer %s had assigned topic %q that wasn't in the list of assignable topics %v", memberID, assignedTopic, consumers[memberID].Topics) t.FailNow() } } // skip last consumer if i == len(members)-1 { continue } consumerAssignments := make([]topicPartitionAssignment, 0) for topic, partitions := range plan[memberID] { for _, partition := range partitions { consumerAssignments = append(consumerAssignments, topicPartitionAssignment{Topic: topic, Partition: partition}) } } for j := i + 1; j < size; j++ { otherConsumer := members[j] otherConsumerAssignments := make([]topicPartitionAssignment, 0) for topic, partitions := range plan[otherConsumer] { for _, partition := range partitions { otherConsumerAssignments = append(otherConsumerAssignments, topicPartitionAssignment{Topic: topic, Partition: partition}) } } assignmentsIntersection := intersection(consumerAssignments, otherConsumerAssignments) if len(assignmentsIntersection) > 0 { t.Errorf("Consumers %s and %s have common partitions assigned to them: %v", memberID, otherConsumer, assignmentsIntersection) t.FailNow() } if math.Abs(float64(len(consumerAssignments)-len(otherConsumerAssignments))) <= 1 { continue } if len(consumerAssignments) > len(otherConsumerAssignments) { for _, topic := range consumerAssignments { if _, exists := plan[otherConsumer][topic.Topic]; exists { t.Errorf("Some partitions can be moved from %s to %s to achieve a better balance, %s has %d assignments, and %s has %d assignments", otherConsumer, memberID, memberID, len(consumerAssignments), otherConsumer, len(otherConsumerAssignments)) t.FailNow() } } } if len(otherConsumerAssignments) > len(consumerAssignments) { for _, topic := range otherConsumerAssignments { if _, exists := plan[memberID][topic.Topic]; exists { t.Errorf("Some partitions can be moved from %s to %s to achieve a better balance, %s has %d assignments, and %s has %d assignments", memberID, otherConsumer, otherConsumer, len(otherConsumerAssignments), memberID, len(consumerAssignments)) t.FailNow() } } } } } } // Produces the intersection of two slices // From https://github.com/juliangruber/go-intersect func intersection(a interface{}, b interface{}) []interface{} { set := make([]interface{}, 0) hash := make(map[interface{}]bool) av := reflect.ValueOf(a) bv := reflect.ValueOf(b) for i := 0; i < av.Len(); i++ { el := av.Index(i).Interface() hash[el] = true } for i := 0; i < bv.Len(); i++ { el := bv.Index(i).Interface() if _, found := hash[el]; found { set = append(set, el) } } return set } func encodeSubscriberPlan(t *testing.T, assignments map[string][]int32) []byte { return encodeSubscriberPlanWithGeneration(t, assignments, defaultGeneration) } func encodeSubscriberPlanWithGeneration(t *testing.T, assignments map[string][]int32, generation int32) []byte { userDataBytes, err := encode(&StickyAssignorUserDataV1{ Topics: assignments, Generation: generation, }, nil) if err != nil { t.Errorf("encodeSubscriberPlan error = %v", err) t.FailNow() } return userDataBytes } func encodeSubscriberPlanWithGenerationForBenchmark(b *testing.B, assignments map[string][]int32, generation int32) []byte { userDataBytes, err := encode(&StickyAssignorUserDataV1{ Topics: assignments, Generation: generation, }, nil) if err != nil { b.Errorf("encodeSubscriberPlan error = %v", err) b.FailNow() } return userDataBytes } func encodeSubscriberPlanWithOldSchema(t *testing.T, assignments map[string][]int32) []byte { userDataBytes, err := encode(&StickyAssignorUserDataV0{ Topics: assignments, }, nil) if err != nil { t.Errorf("encodeSubscriberPlan error = %v", err) t.FailNow() } return userDataBytes } // verify that the plan is fully balanced, assumes that all consumers can // consume from the same set of topics func verifyFullyBalanced(t *testing.T, plan BalanceStrategyPlan) { min := math.MaxInt32 max := math.MinInt32 for _, topics := range plan { assignedPartitionsCount := 0 for _, partitions := range topics { assignedPartitionsCount += len(partitions) } if assignedPartitionsCount < min { min = assignedPartitionsCount } if assignedPartitionsCount > max { max = assignedPartitionsCount } } if (max - min) > 1 { t.Errorf("Plan partition assignment is not fully balanced: min=%d, max=%d", min, max) } } func getRandomSublist(r *rand.Rand, s []string) []string { howManyToRemove := r.Intn(len(s)) allEntriesMap := make(map[int]string) for i, s := range s { allEntriesMap[i] = s } for i := 0; i < howManyToRemove; i++ { delete(allEntriesMap, r.Intn(len(allEntriesMap))) } subList := make([]string, len(allEntriesMap)) i := 0 for _, s := range allEntriesMap { subList[i] = s i++ } return subList } func Test_sortPartitionsByPotentialConsumerAssignments(t *testing.T) { type args struct { partition2AllPotentialConsumers map[topicPartitionAssignment][]string } tests := []struct { name string args args want []topicPartitionAssignment }{ { name: "Single topic partition", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ { Topic: "t1", Partition: 0, }: {"c1", "c2"}, }, }, want: []topicPartitionAssignment{ { Topic: "t1", Partition: 0, }, }, }, { name: "Multiple topic partitions with the same number of consumers but different topic names", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ { Topic: "t1", Partition: 0, }: {"c1", "c2"}, { Topic: "t2", Partition: 0, }: {"c1", "c2"}, }, }, want: []topicPartitionAssignment{ { Topic: "t1", Partition: 0, }, { Topic: "t2", Partition: 0, }, }, }, { name: "Multiple topic partitions with the same number of consumers and topic names", args: args{ partition2AllPotentialConsumers: map[topicPartitionAssignment][]string{ { Topic: "t1", Partition: 0, }: {"c1", "c2"}, { Topic: "t1", Partition: 1, }: {"c1", "c2"}, }, }, want: []topicPartitionAssignment{ { Topic: "t1", Partition: 0, }, { Topic: "t1", Partition: 1, }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { if got := sortPartitionsByPotentialConsumerAssignments(tt.args.partition2AllPotentialConsumers); !reflect.DeepEqual(got, tt.want) { t.Errorf("sortPartitionsByPotentialConsumerAssignments() = %v, want %v", got, tt.want) } }) } } golang-github-ibm-sarama-1.43.2/broker.go000066400000000000000000001463671461256741300202040ustar00rootroot00000000000000package sarama import ( "crypto/tls" "encoding/binary" "errors" "fmt" "io" "math/rand" "net" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/rcrowley/go-metrics" ) // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. type Broker struct { conf *Config rack *string id int32 addr string correlationID int32 conn net.Conn connErr error lock sync.Mutex opened int32 responses chan *responsePromise done chan bool metricRegistry metrics.Registry incomingByteRate metrics.Meter requestRate metrics.Meter fetchRate metrics.Meter requestSize metrics.Histogram requestLatency metrics.Histogram outgoingByteRate metrics.Meter responseRate metrics.Meter responseSize metrics.Histogram requestsInFlight metrics.Counter protocolRequestsRate map[int16]metrics.Meter brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter brokerFetchRate metrics.Meter brokerRequestSize metrics.Histogram brokerRequestLatency metrics.Histogram brokerOutgoingByteRate metrics.Meter brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram brokerRequestsInFlight metrics.Counter brokerThrottleTime metrics.Histogram brokerProtocolRequestsRate map[int16]metrics.Meter kerberosAuthenticator GSSAPIKerberosAuth clientSessionReauthenticationTimeMs int64 throttleTimer *time.Timer throttleTimerLock sync.Mutex } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker type SASLMechanism string const ( // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) SASLTypeOAuth = "OAUTHBEARER" // SASLTypePlaintext represents the SASL/PLAIN mechanism SASLTypePlaintext = "PLAIN" // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" SASLTypeGSSAPI = "GSSAPI" // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and // server negotiate SASL auth using opaque packets. SASLHandshakeV0 = int16(0) // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and // server negotiate SASL by wrapping tokens with Kafka protocol headers. SASLHandshakeV1 = int16(1) // SASLExtKeyAuth is the reserved extension key name sent as part of the // SASL/OAUTHBEARER initial client response SASLExtKeyAuth = "auth" ) // AccessToken contains an access token used to authenticate a // SASL/OAUTHBEARER client along with associated metadata. type AccessToken struct { // Token is the access token payload. Token string // Extensions is a optional map of arbitrary key-value pairs that can be // sent with the SASL/OAUTHBEARER initial client response. These values are // ignored by the SASL server if they are unexpected. This feature is only // supported by Kafka >= 2.1.0. Extensions map[string]string } // AccessTokenProvider is the interface that encapsulates how implementors // can generate access tokens for Kafka broker authentication. type AccessTokenProvider interface { // Token returns an access token. The implementation should ensure token // reuse so that multiple calls at connect time do not create multiple // tokens. The implementation should also periodically refresh the token in // order to guarantee that each call returns an unexpired token. This // method should not block indefinitely--a timeout error should be returned // after a short period of inactivity so that the broker connection logic // can log debugging information and retry. Token() (*AccessToken, error) } // SCRAMClient is a an interface to a SCRAM // client implementation. type SCRAMClient interface { // Begin prepares the client for the SCRAM exchange // with the server with a user name and a password Begin(userName, password, authzID string) error // Step steps client through the SCRAM exchange. It is // called repeatedly until it errors or `Done` returns true. Step(challenge string) (response string, err error) // Done should return true when the SCRAM conversation // is over. Done() bool } type responsePromise struct { requestTime time.Time correlationID int32 headerVersion int16 handler func([]byte, error) packets chan []byte errors chan error } func (p *responsePromise) handle(packets []byte, err error) { // Use callback when provided if p.handler != nil { p.handler(packets, err) return } // Otherwise fallback to using channels if err != nil { p.errors <- err return } p.packets <- packets } // NewBroker creates and returns a Broker targeting the given host:port address. // This does not attempt to actually connect, you have to call Open() for that. func NewBroker(addr string) *Broker { return &Broker{id: -1, addr: addr} } // Open tries to connect to the Broker if it is not already connected or connecting, but does not block // waiting for the connection to complete. This means that any subsequent operations on the broker will // block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, // follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or // AlreadyConnected. If conf is nil, the result of NewConfig() is used. func (b *Broker) Open(conf *Config) error { if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { return ErrAlreadyConnected } if conf == nil { conf = NewConfig() } err := conf.Validate() if err != nil { return err } usingApiVersionsRequests := conf.Version.IsAtLeast(V2_4_0_0) && conf.ApiVersionsRequest b.lock.Lock() if b.metricRegistry == nil { b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) } go withRecover(func() { defer func() { b.lock.Unlock() // Send an ApiVersionsRequest to identify the client (KIP-511). // Ideally Sarama would use the response to control protocol versions, // but for now just fire-and-forget just to send if usingApiVersionsRequests { _, err = b.ApiVersions(&ApiVersionsRequest{ Version: 3, ClientSoftwareName: defaultClientSoftwareName, ClientSoftwareVersion: version(), }) if err != nil { Logger.Printf("Error while sending ApiVersionsRequest to broker %s: %s\n", b.addr, err) } } }() dialer := conf.getDialer() b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) b.conn = nil atomic.StoreInt32(&b.opened, 0) return } if conf.Net.TLS.Enable { b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) } b.conn = newBufConn(b.conn) b.conf = conf // Create or reuse the global metrics shared between brokers b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry) b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry) b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry) b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry) b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry) b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry) b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry) b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry) b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry) b.protocolRequestsRate = map[int16]metrics.Meter{} // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 && !metrics.UseNilMetrics { b.registerMetrics() } if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 { conf.Net.SASL.Version = SASLHandshakeV1 } useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0 || conf.Net.SASL.Mechanism == SASLTypeGSSAPI if conf.Net.SASL.Enable && useSaslV0 { b.connErr = b.authenticateViaSASLv0() if b.connErr != nil { err = b.conn.Close() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) return } } b.done = make(chan bool) b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1) go withRecover(b.responseReceiver) if conf.Net.SASL.Enable && !useSaslV0 { b.connErr = b.authenticateViaSASLv1() if b.connErr != nil { close(b.responses) <-b.done err = b.conn.Close() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) return } } if b.id >= 0 { DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) } else { DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr) } }) return nil } func (b *Broker) ResponseSize() int { b.lock.Lock() defer b.lock.Unlock() return len(b.responses) } // Connected returns true if the broker is connected and false otherwise. If the broker is not // connected but it had tried to connect, the error from that connection attempt is also returned. func (b *Broker) Connected() (bool, error) { b.lock.Lock() defer b.lock.Unlock() return b.conn != nil, b.connErr } // TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established. func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) { b.lock.Lock() defer b.lock.Unlock() if b.conn == nil { return state, false } conn := b.conn if bconn, ok := b.conn.(*bufConn); ok { conn = bconn.Conn } if tc, ok := conn.(*tls.Conn); ok { return tc.ConnectionState(), true } return state, false } // Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() defer b.lock.Unlock() if b.conn == nil { return ErrNotConnected } close(b.responses) <-b.done err := b.conn.Close() b.conn = nil b.connErr = nil b.done = nil b.responses = nil b.metricRegistry.UnregisterAll() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) } else { Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) } atomic.StoreInt32(&b.opened, 0) return err } // ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. func (b *Broker) ID() int32 { return b.id } // Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. func (b *Broker) Addr() string { return b.addr } // Rack returns the broker's rack as retrieved from Kafka's metadata or the // empty string if it is not known. The returned value corresponds to the // broker's broker.rack configuration setting. Requires protocol version to be // at least v0.10.0.0. func (b *Broker) Rack() string { if b.rack == nil { return "" } return *b.rack } // GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { response := new(ConsumerMetadataResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // FindCoordinator sends a find coordinate request and returns a response or error func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { response := new(FindCoordinatorResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // GetAvailableOffsets return an offset response or error func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { response := new(OffsetResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // ProduceCallback function is called once the produce response has been parsed // or could not be read. type ProduceCallback func(*ProduceResponse, error) // AsyncProduce sends a produce request and eventually call the provided callback // with a produce response or an error. // // Waiting for the response is generally not blocking on the contrary to using Produce. // If the maximum number of in flight request configured is reached then // the request will be blocked till a previous response is received. // // When configured with RequiredAcks == NoResponse, the callback will not be invoked. // If an error is returned because the request could not be sent then the callback // will not be invoked either. // // Make sure not to Close the broker in the callback as it will lead to a deadlock. func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error { b.lock.Lock() defer b.lock.Unlock() needAcks := request.RequiredAcks != NoResponse // Use a nil promise when no acks is required var promise *responsePromise if needAcks { metricRegistry := b.metricRegistry // Create ProduceResponse early to provide the header version res := new(ProduceResponse) promise = &responsePromise{ headerVersion: res.headerVersion(), // Packets will be converted to a ProduceResponse in the responseReceiver goroutine handler: func(packets []byte, err error) { if err != nil { // Failed request cb(nil, err) return } if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil { // Malformed response cb(nil, err) return } // Well-formed response b.handleThrottledResponse(res) cb(res, nil) }, } } return b.sendWithPromise(request, promise) } // Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { var ( response *ProduceResponse err error ) if request.RequiredAcks == NoResponse { err = b.sendAndReceive(request, nil) } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) } if err != nil { return nil, err } return response, nil } // Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { defer func() { if b.fetchRate != nil { b.fetchRate.Mark(1) } if b.brokerFetchRate != nil { b.brokerFetchRate.Mark(1) } }() response := new(FetchResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // FetchOffset returns an offset fetch response or error func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { response := new(OffsetFetchResponse) response.Version = request.Version // needed to handle the two header versions err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // JoinGroup returns a join group response or error func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { response := new(JoinGroupResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // SyncGroup returns a sync group response or error func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { response := new(SyncGroupResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // LeaveGroup return a leave group response or error func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { response := new(LeaveGroupResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // Heartbeat returns a heartbeat response or error func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { response := new(HeartbeatResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DescribeGroups return describe group response or error func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { response := new(DescribeGroupsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // ApiVersions return api version response or error func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { response := new(ApiVersionsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // CreateTopics send a create topic request and returns create topic response func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { response := new(CreateTopicsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DeleteTopics sends a delete topic request and returns delete topic response func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { response := new(DeleteTopicsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // CreatePartitions sends a create partition request and returns create // partitions response or error func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { response := new(CreatePartitionsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // AlterPartitionReassignments sends a alter partition reassignments request and // returns alter partition reassignments response func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { response := new(AlterPartitionReassignmentsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // ListPartitionReassignments sends a list partition reassignments request and // returns list partition reassignments response func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { response := new(ListPartitionReassignmentsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DeleteRecords send a request to delete records and return delete record // response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { response := new(DeleteRecordsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DescribeAcls sends a describe acl request and returns a response or error func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { response := new(DescribeAclsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // CreateAcls sends a create acl request and returns a response or error func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { response := new(CreateAclsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } errs := make([]error, 0) for _, res := range response.AclCreationResponses { if !errors.Is(res.Err, ErrNoError) { errs = append(errs, res.Err) } } if len(errs) > 0 { return response, Wrap(ErrCreateACLs, errs...) } return response, nil } // DeleteAcls sends a delete acl request and returns a response or error func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { response := new(DeleteAclsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // InitProducerID sends an init producer request and returns a response or error func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { response := new(InitProducerIDResponse) response.Version = request.version() err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // AddPartitionsToTxn send a request to add partition to txn and returns // a response or error func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { response := new(AddPartitionsToTxnResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // AddOffsetsToTxn sends a request to add offsets to txn and returns a response // or error func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { response := new(AddOffsetsToTxnResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // EndTxn sends a request to end txn and returns a response or error func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { response := new(EndTxnResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // TxnOffsetCommit sends a request to commit transaction offsets and returns // a response or error func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { response := new(TxnOffsetCommitResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DescribeConfigs sends a request to describe config and returns a response or // error func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { response := new(DescribeConfigsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // AlterConfigs sends a request to alter config and return a response or error func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { response := new(AlterConfigsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // IncrementalAlterConfigs sends a request to incremental alter config and return a response or error func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) { response := new(IncrementalAlterConfigsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DeleteGroups sends a request to delete groups and returns a response or error func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { response := new(DeleteGroupsResponse) if err := b.sendAndReceive(request, response); err != nil { return nil, err } return response, nil } // DeleteOffsets sends a request to delete group offsets and returns a response or error func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) { response := new(DeleteOffsetsResponse) if err := b.sendAndReceive(request, response); err != nil { return nil, err } return response, nil } // DescribeLogDirs sends a request to get the broker's log dir paths and sizes func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { response := new(DescribeLogDirsResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // DescribeUserScramCredentials sends a request to get SCRAM users func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) { res := new(DescribeUserScramCredentialsResponse) err := b.sendAndReceive(req, res) if err != nil { return nil, err } return res, err } func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) { res := new(AlterUserScramCredentialsResponse) err := b.sendAndReceive(req, res) if err != nil { return nil, err } return res, nil } // DescribeClientQuotas sends a request to get the broker's quotas func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) { response := new(DescribeClientQuotasResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // AlterClientQuotas sends a request to alter the broker's quotas func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) { response := new(AlterClientQuotasResponse) err := b.sendAndReceive(request, response) if err != nil { return nil, err } return response, nil } // readFull ensures the conn ReadDeadline has been setup before making a // call to io.ReadFull func (b *Broker) readFull(buf []byte) (n int, err error) { if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil { return 0, err } return io.ReadFull(b.conn, buf) } // write ensures the conn WriteDeadline has been setup before making a // call to conn.Write func (b *Broker) write(buf []byte) (n int, err error) { if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { return 0, err } return b.conn.Write(buf) } // b.lock must be held by caller func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { var promise *responsePromise if promiseResponse { // Packets or error will be sent to the following channels // once the response is received promise = makeResponsePromise(responseHeaderVersion) } if err := b.sendWithPromise(rb, promise); err != nil { return nil, err } return promise, nil } func makeResponsePromise(responseHeaderVersion int16) *responsePromise { promise := &responsePromise{ headerVersion: responseHeaderVersion, packets: make(chan []byte), errors: make(chan error), } return promise } // b.lock must be held by caller func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error { if b.conn == nil { if b.connErr != nil { return b.connErr } return ErrNotConnected } if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs { err := b.authenticateViaSASLv1() if err != nil { return err } } return b.sendInternal(rb, promise) } // b.lock must be held by caller func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { return ErrUnsupportedVersion } req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} buf, err := encode(req, b.metricRegistry) if err != nil { return err } // check and wait if throttled b.waitIfThrottled() requestTime := time.Now() // Will be decremented in responseReceiver (except error or request with NoResponse) b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) b.updateProtocolMetrics(rb) if err != nil { b.addRequestInFlightMetrics(-1) return err } b.correlationID++ if promise == nil { // Record request latency without the response b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) return nil } promise.requestTime = requestTime promise.correlationID = req.correlationID b.responses <- promise return nil } func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { b.lock.Lock() defer b.lock.Unlock() responseHeaderVersion := int16(-1) if res != nil { responseHeaderVersion = res.headerVersion() } promise, err := b.send(req, res != nil, responseHeaderVersion) if err != nil { return err } if promise == nil { return nil } err = handleResponsePromise(req, res, promise, b.metricRegistry) if err != nil { return err } if res != nil { b.handleThrottledResponse(res) } return nil } func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error { select { case buf := <-promise.packets: return versionedDecode(buf, res, req.version(), metricRegistry) case err := <-promise.errors: return err } } func (b *Broker) decode(pd packetDecoder, version int16) (err error) { b.id, err = pd.getInt32() if err != nil { return err } var host string if version < 9 { host, err = pd.getString() } else { host, err = pd.getCompactString() } if err != nil { return err } port, err := pd.getInt32() if err != nil { return err } if version >= 1 && version < 9 { b.rack, err = pd.getNullableString() } else if version >= 9 { b.rack, err = pd.getCompactNullableString() } if err != nil { return err } b.addr = net.JoinHostPort(host, fmt.Sprint(port)) if _, _, err := net.SplitHostPort(b.addr); err != nil { return err } if version >= 9 { _, err := pd.getEmptyTaggedFieldArray() if err != nil { return err } } return nil } func (b *Broker) encode(pe packetEncoder, version int16) (err error) { host, portstr, err := net.SplitHostPort(b.addr) if err != nil { return err } port, err := strconv.ParseInt(portstr, 10, 32) if err != nil { return err } pe.putInt32(b.id) if version < 9 { err = pe.putString(host) } else { err = pe.putCompactString(host) } if err != nil { return err } pe.putInt32(int32(port)) if version >= 1 { if version < 9 { err = pe.putNullableString(b.rack) } else { err = pe.putNullableCompactString(b.rack) } if err != nil { return err } } if version >= 9 { pe.putEmptyTaggedFieldArray() } return nil } func (b *Broker) responseReceiver() { var dead error for response := range b.responses { if dead != nil { // This was previously incremented in send() and // we are not calling updateIncomingCommunicationMetrics() b.addRequestInFlightMetrics(-1) response.handle(nil, dead) continue } headerLength := getHeaderLength(response.headerVersion) header := make([]byte, headerLength) bytesReadHeader, err := b.readFull(header) requestLatency := time.Since(response.requestTime) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.handle(nil, err) continue } decodedHeader := responseHeader{} err = versionedDecode(header, &decodedHeader, response.headerVersion, b.metricRegistry) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err response.handle(nil, err) continue } if decodedHeader.correlationID != response.correlationID { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) // TODO if decoded ID < cur ID, discard until we catch up // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} response.handle(nil, dead) continue } buf := make([]byte, decodedHeader.length-int32(headerLength)+4) bytesReadBody, err := b.readFull(buf) b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err response.handle(nil, err) continue } response.handle(buf, nil) } close(b.done) } func getHeaderLength(headerVersion int16) int8 { if headerVersion < 1 { return 8 } else { // header contains additional tagged field length (0), we don't support actual tags yet. return 9 } } func (b *Broker) authenticateViaSASLv0() error { switch b.conf.Net.SASL.Mechanism { case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: return b.sendAndReceiveSASLSCRAMv0() case SASLTypeGSSAPI: return b.sendAndReceiveKerberos() default: return b.sendAndReceiveSASLPlainAuthV0() } } func (b *Broker) authenticateViaSASLv1() error { metricRegistry := b.metricRegistry if b.conf.Net.SASL.Handshake { handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version} handshakeResponse := new(SaslHandshakeResponse) prom := makeResponsePromise(handshakeResponse.version()) handshakeErr := b.sendInternal(handshakeRequest, prom) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } if !errors.Is(handshakeResponse.Err, ErrNoError) { return handshakeResponse.Err } } authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) { authenticateRequest := b.createSaslAuthenticateRequest(authBytes) authenticateResponse := new(SaslAuthenticateResponse) prom := makeResponsePromise(authenticateResponse.version()) authErr := b.sendInternal(authenticateRequest, prom) if authErr != nil { Logger.Printf("Error while performing SASL Auth %s\n", b.addr) return nil, authErr } authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry) if authErr != nil { Logger.Printf("Error while performing SASL Auth %s\n", b.addr) return nil, authErr } if !errors.Is(authenticateResponse.Err, ErrNoError) { var err error = authenticateResponse.Err if authenticateResponse.ErrorMessage != nil { err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage)) } return nil, err } b.computeSaslSessionLifetime(authenticateResponse) return authenticateResponse, nil } switch b.conf.Net.SASL.Mechanism { case SASLTypeOAuth: provider := b.conf.Net.SASL.TokenProvider return b.sendAndReceiveSASLOAuth(authSendReceiver, provider) case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc()) default: return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver) } } func (b *Broker) sendAndReceiveKerberos() error { b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI if b.kerberosAuthenticator.NewKerberosClientFunc == nil { b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient } return b.kerberosAuthenticator.Authorize(b) } func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} buf, err := encode(req, b.metricRegistry) if err != nil { return err } requestTime := time.Now() // Will be decremented in updateIncomingCommunicationMetrics (except error) b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) return err } b.correlationID++ header := make([]byte, 8) // response header _, err = b.readFull(header) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) return err } length := binary.BigEndian.Uint32(header[:4]) payload := make([]byte, length-4) n, err := b.readFull(payload) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} err = versionedDecode(payload, res, 0, b.metricRegistry) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } if !errors.Is(res.Err, ErrNoError) { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } // // In SASL Plain, Kafka expects the auth header to be in the following format // Message format (from https://tools.ietf.org/html/rfc4616): // // message = [authzid] UTF8NUL authcid UTF8NUL passwd // authcid = 1*SAFE ; MUST accept up to 255 octets // authzid = 1*SAFE ; MUST accept up to 255 octets // passwd = 1*SAFE ; MUST accept up to 255 octets // UTF8NUL = %x00 ; UTF-8 encoded NUL character // // SAFE = UTF1 / UTF2 / UTF3 / UTF4 // ;; any UTF-8 encoded Unicode character except NUL // // // Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). // sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol // // With SASL v0 handshake and auth then: // When credentials are valid, Kafka returns a 4 byte array of null characters. // When credentials are invalid, Kafka closes the connection. func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } } length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password) requestTime := time.Now() // Will be decremented in updateIncomingCommunicationMetrics (except error) b.addRequestInFlightMetrics(1) bytesWritten, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } header := make([]byte, 4) n, err := b.readFull(header) b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) // If the credentials are valid, we would get a 4 byte response filled with null characters. // Otherwise, the broker closes the connection and we get an EOF if err != nil { Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) return nil } // Kafka 1.x.x onward added a SaslAuthenticate request/response message which // wraps the SASL flow in the Kafka protocol, which allows for returning // meaningful errors on authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error { authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) _, err := authSendReceiver(authBytes) if err != nil { return err } return err } func currentUnixMilli() int64 { return time.Now().UnixNano() / int64(time.Millisecond) } // sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 // https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error { token, err := provider.Token() if err != nil { return err } message, err := buildClientFirstMessage(token) if err != nil { return err } res, err := authSendReceiver(message) if err != nil { return err } isChallenge := len(res.SaslAuthBytes) > 0 if isChallenge { // Abort the token exchange. The broker returns the failure code. _, err = authSendReceiver([]byte(`\x01`)) } return err } func (b *Broker) sendAndReceiveSASLSCRAMv0() error { if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil { return err } scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } msg, err := scramClient.Step("") if err != nil { return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } for !scramClient.Done() { requestTime := time.Now() // Will be decremented in updateIncomingCommunicationMetrics (except error) b.addRequestInFlightMetrics(1) length := len(msg) authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) copy(authBytes[4:], msg) _, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(length + 4) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } b.correlationID++ header := make([]byte, 4) _, err = b.readFull(header) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } payload := make([]byte, int32(binary.BigEndian.Uint32(header))) n, err := b.readFull(payload) if err != nil { b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime)) msg, err = scramClient.Step(string(payload)) if err != nil { Logger.Println("SASL authentication failed", err) return err } } DebugLogger.Println("SASL authentication succeeded") return nil } func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error { if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } msg, err := scramClient.Step("") if err != nil { return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } for !scramClient.Done() { res, err := authSendReceiver([]byte(msg)) if err != nil { return err } msg, err = scramClient.Step(string(res.SaslAuthBytes)) if err != nil { Logger.Println("SASL authentication failed", err) return err } } DebugLogger.Println("SASL authentication succeeded") return nil } func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest { authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg} if b.conf.Version.IsAtLeast(V2_2_0_0) { authenticateRequest.Version = 1 } return &authenticateRequest } // Build SASL/OAUTHBEARER initial client response as described by RFC-7628 // https://tools.ietf.org/html/rfc7628 func buildClientFirstMessage(token *AccessToken) ([]byte, error) { var ext string if token.Extensions != nil && len(token.Extensions) > 0 { if _, ok := token.Extensions[SASLExtKeyAuth]; ok { return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) } ext = "\x01" + mapToString(token.Extensions, "=", "\x01") } resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) return resp, nil } // mapToString returns a list of key-value pairs ordered by key. // keyValSep separates the key from the value. elemSep separates each pair. func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { buf := make([]string, 0, len(extensions)) for k, v := range extensions { buf = append(buf, k+keyValSep+v) } sort.Strings(buf) return strings.Join(buf, elemSep) } func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) { if res.SessionLifetimeMs > 0 { // Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes // pick a random percentage between 85% and 95% for session re-authentication positiveSessionLifetimeMs := res.SessionLifetimeMs authenticationEndMs := currentUnixMilli() pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85 pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10 pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse) DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse) b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse } else { b.clientSessionReauthenticationTimeMs = 0 } } func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { b.updateRequestLatencyAndInFlightMetrics(requestLatency) b.responseRate.Mark(1) if b.brokerResponseRate != nil { b.brokerResponseRate.Mark(1) } responseSize := int64(bytes) b.incomingByteRate.Mark(responseSize) if b.brokerIncomingByteRate != nil { b.brokerIncomingByteRate.Mark(responseSize) } b.responseSize.Update(responseSize) if b.brokerResponseSize != nil { b.brokerResponseSize.Update(responseSize) } } func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { requestLatencyInMs := int64(requestLatency / time.Millisecond) b.requestLatency.Update(requestLatencyInMs) if b.brokerRequestLatency != nil { b.brokerRequestLatency.Update(requestLatencyInMs) } b.addRequestInFlightMetrics(-1) } func (b *Broker) addRequestInFlightMetrics(i int64) { b.requestsInFlight.Inc(i) if b.brokerRequestsInFlight != nil { b.brokerRequestsInFlight.Inc(i) } } func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { b.requestRate.Mark(1) if b.brokerRequestRate != nil { b.brokerRequestRate.Mark(1) } requestSize := int64(bytes) b.outgoingByteRate.Mark(requestSize) if b.brokerOutgoingByteRate != nil { b.brokerOutgoingByteRate.Mark(requestSize) } b.requestSize.Update(requestSize) if b.brokerRequestSize != nil { b.brokerRequestSize.Update(requestSize) } } func (b *Broker) updateProtocolMetrics(rb protocolBody) { protocolRequestsRate := b.protocolRequestsRate[rb.key()] if protocolRequestsRate == nil { protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry) b.protocolRequestsRate[rb.key()] = protocolRequestsRate } protocolRequestsRate.Mark(1) if b.brokerProtocolRequestsRate != nil { brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()] if brokerProtocolRequestsRate == nil { brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key())) b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate } brokerProtocolRequestsRate.Mark(1) } } type throttleSupport interface { throttleTime() time.Duration } func (b *Broker) handleThrottledResponse(resp protocolBody) { throttledResponse, ok := resp.(throttleSupport) if !ok { return } throttleTime := throttledResponse.throttleTime() if throttleTime == time.Duration(0) { return } DebugLogger.Printf( "broker/%d %T throttled %v\n", b.ID(), resp, throttleTime) b.setThrottle(throttleTime) b.updateThrottleMetric(throttleTime) } func (b *Broker) setThrottle(throttleTime time.Duration) { b.throttleTimerLock.Lock() defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { // if there is an existing timer stop/clear it if !b.throttleTimer.Stop() { <-b.throttleTimer.C } } b.throttleTimer = time.NewTimer(throttleTime) } func (b *Broker) waitIfThrottled() { b.throttleTimerLock.Lock() defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID()) <-b.throttleTimer.C b.throttleTimer = nil } } func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { if b.brokerThrottleTime != nil { throttleTimeInMs := int64(throttleTime / time.Millisecond) b.brokerThrottleTime.Update(throttleTimeInMs) } } func (b *Broker) registerMetrics() { b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") b.brokerRequestRate = b.registerMeter("request-rate") b.brokerFetchRate = b.registerMeter("consumer-fetch-rate") b.brokerRequestSize = b.registerHistogram("request-size") b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") b.brokerResponseRate = b.registerMeter("response-rate") b.brokerResponseSize = b.registerHistogram("response-size") b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms") b.brokerProtocolRequestsRate = map[int16]metrics.Meter{} } func (b *Broker) registerMeter(name string) metrics.Meter { nameForBroker := getMetricNameForBroker(name, b) return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry) } func (b *Broker) registerHistogram(name string) metrics.Histogram { nameForBroker := getMetricNameForBroker(name, b) return getOrRegisterHistogram(nameForBroker, b.metricRegistry) } func (b *Broker) registerCounter(name string) metrics.Counter { nameForBroker := getMetricNameForBroker(name, b) return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry) } func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { if cfg == nil { cfg = &tls.Config{ MinVersion: tls.VersionTLS12, } } if cfg.ServerName != "" { return cfg } c := cfg.Clone() sn, _, err := net.SplitHostPort(addr) if err != nil { Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) } c.ServerName = sn return c } golang-github-ibm-sarama-1.43.2/broker_test.go000066400000000000000000001254171461256741300212340ustar00rootroot00000000000000package sarama import ( "bytes" "errors" "fmt" "net" "reflect" "testing" "time" "github.com/jcmturner/gokrb5/v8/krberror" "github.com/rcrowley/go-metrics" ) func ExampleBroker() { broker := NewBroker("localhost:9092") err := broker.Open(nil) if err != nil { panic(err) } request := MetadataRequest{Topics: []string{"myTopic"}} response, err := broker.GetMetadata(&request) if err != nil { _ = broker.Close() panic(err) } fmt.Println("There are", len(response.Topics), "topics active in the cluster.") if err = broker.Close(); err != nil { panic(err) } } type mockEncoder struct { bytes []byte } func (m mockEncoder) encode(pe packetEncoder) error { return pe.putRawBytes(m.bytes) } func (m mockEncoder) headerVersion() int16 { return 0 } type brokerMetrics struct { bytesRead int bytesWritten int } func TestBrokerAccessors(t *testing.T) { broker := NewBroker("abc:123") if broker.ID() != -1 { t.Error("New broker didn't have an ID of -1.") } if broker.Addr() != "abc:123" { t.Error("New broker didn't have the correct address") } if broker.Rack() != "" { t.Error("New broker didn't have an unknown rack.") } broker.id = 34 if broker.ID() != 34 { t.Error("Manually setting broker ID did not take effect.") } rack := "dc1" broker.rack = &rack if broker.Rack() != rack { t.Error("Manually setting broker rack did not take effect.") } } type produceResponsePromise struct { c chan produceResOrError } type produceResOrError struct { res *ProduceResponse err error } func newProduceResponsePromise() produceResponsePromise { return produceResponsePromise{ c: make(chan produceResOrError, 0), } } func (p produceResponsePromise) callback(res *ProduceResponse, err error) { if err != nil { p.c <- produceResOrError{ err: err, } return } p.c <- produceResOrError{ res: res, } } func (p produceResponsePromise) Get() (*ProduceResponse, error) { resOrError := <-p.c return resOrError.res, resOrError.err } func TestSimpleBrokerCommunication(t *testing.T) { for _, tt := range brokerTestTable { tt := tt t.Run(tt.name, func(t *testing.T) { Logger.Printf("Testing broker communication for %s", tt.name) mb := NewMockBroker(t, 0) mb.Returns(&mockEncoder{tt.response}) pendingNotify := make(chan brokerMetrics) // Register a callback to be notified about successful requests mb.SetNotifier(func(bytesRead, bytesWritten int) { pendingNotify <- brokerMetrics{bytesRead, bytesWritten} }) broker := NewBroker(mb.Addr()) // Set the broker id in order to validate local broker metrics broker.id = 0 conf := NewTestConfig() conf.ApiVersionsRequest = false conf.Version = tt.version err := broker.Open(conf) if err != nil { t.Fatal(err) } if _, err := broker.Connected(); err != nil { t.Error(err) } tt.runner(t, broker) // Wait up to 500 ms for the remote broker to process the request and // notify us about the metrics timeout := 500 * time.Millisecond select { case mockBrokerMetrics := <-pendingNotify: validateBrokerMetrics(t, broker, mockBrokerMetrics) case <-time.After(timeout): t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout) } mb.Close() err = broker.Close() if err != nil { t.Error(err) } }) } } func TestBrokerFailedRequest(t *testing.T) { for _, tt := range brokerFailedReqTestTable { tt := tt t.Run(tt.name, func(t *testing.T) { t.Logf("Testing broker communication for %s", tt.name) mb := NewMockBroker(t, 0) if !tt.stopBroker { mb.Returns(&mockEncoder{tt.response}) } broker := NewBroker(mb.Addr()) // Stop the broker before calling the runner to purposefully // make the request fail right away, the port will be closed // and should not be reused right away if tt.stopBroker { t.Log("Closing broker:", mb.Addr()) mb.Close() } conf := NewTestConfig() conf.ApiVersionsRequest = false conf.Version = tt.version // Tune read timeout to speed up some test cases conf.Net.ReadTimeout = 1 * time.Second err := broker.Open(conf) if err != nil { t.Fatal(err) } tt.runner(t, broker) if !tt.stopBroker { mb.Close() } err = broker.Close() if err != nil { if tt.stopBroker && errors.Is(err, ErrNotConnected) { // We expect the broker to not close properly return } t.Error(err) } }) } } var ErrTokenFailure = errors.New("Failure generating token") type TokenProvider struct { accessToken *AccessToken err error } func (t *TokenProvider) Token() (*AccessToken, error) { return t.accessToken, t.err } func newTokenProvider(token *AccessToken, err error) *TokenProvider { return &TokenProvider{ accessToken: token, err: err, } } func TestSASLOAuthBearer(t *testing.T) { testTable := []struct { name string authidentity string mockSASLHandshakeResponse MockResponse // Mock SaslHandshakeRequest response from broker mockSASLAuthResponse MockResponse // Mock SaslAuthenticateRequest response from broker expectClientErr bool // Expect an internal client-side error expectedBrokerError KError // Expected Kafka error returned by client tokProvider *TokenProvider }{ { name: "SASL/OAUTHBEARER OK server response", mockSASLHandshakeResponse: NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypeOAuth}), mockSASLAuthResponse: NewMockSaslAuthenticateResponse(t), expectClientErr: false, expectedBrokerError: ErrNoError, tokProvider: newTokenProvider(&AccessToken{Token: "access-token-123"}, nil), }, { name: "SASL/OAUTHBEARER authentication failure response", mockSASLHandshakeResponse: NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypeOAuth}), mockSASLAuthResponse: NewMockSequence( // First, the broker response with a challenge NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`{"status":"invalid_request1"}`)), // Next, the client terminates the token exchange. Finally, the // broker responds with an error message. NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`{"status":"invalid_request2"}`)). SetError(ErrSASLAuthenticationFailed), ), expectClientErr: true, expectedBrokerError: ErrSASLAuthenticationFailed, tokProvider: newTokenProvider(&AccessToken{Token: "access-token-123"}, nil), }, { name: "SASL/OAUTHBEARER handshake failure response", mockSASLHandshakeResponse: NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypeOAuth}). SetError(ErrSASLAuthenticationFailed), mockSASLAuthResponse: NewMockSaslAuthenticateResponse(t), expectClientErr: true, expectedBrokerError: ErrSASLAuthenticationFailed, tokProvider: newTokenProvider(&AccessToken{Token: "access-token-123"}, nil), }, { name: "SASL/OAUTHBEARER token generation error", mockSASLHandshakeResponse: NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypeOAuth}), mockSASLAuthResponse: NewMockSaslAuthenticateResponse(t), expectClientErr: true, expectedBrokerError: ErrNoError, tokProvider: newTokenProvider(&AccessToken{Token: "access-token-123"}, ErrTokenFailure), }, { name: "SASL/OAUTHBEARER invalid extension", mockSASLHandshakeResponse: NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypeOAuth}), mockSASLAuthResponse: NewMockSaslAuthenticateResponse(t), expectClientErr: true, expectedBrokerError: ErrNoError, tokProvider: newTokenProvider(&AccessToken{ Token: "access-token-123", Extensions: map[string]string{"auth": "auth-value"}, }, nil), }, } for i, test := range testTable { test := test t.Run(test.name, func(t *testing.T) { // mockBroker mocks underlying network logic and broker responses mockBroker := NewMockBroker(t, 0) mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": test.mockSASLAuthResponse, "SaslHandshakeRequest": test.mockSASLHandshakeResponse, }) // broker executes SASL requests against mockBroker broker := NewBroker(mockBroker.Addr()) broker.requestRate = metrics.NilMeter{} broker.outgoingByteRate = metrics.NilMeter{} broker.incomingByteRate = metrics.NilMeter{} broker.requestSize = metrics.NilHistogram{} broker.responseSize = metrics.NilHistogram{} broker.responseRate = metrics.NilMeter{} broker.requestLatency = metrics.NilHistogram{} broker.requestsInFlight = metrics.NilCounter{} conf := NewTestConfig() conf.Net.SASL.Mechanism = SASLTypeOAuth conf.Net.SASL.TokenProvider = test.tokProvider conf.Net.SASL.Enable = true conf.Version = V1_0_0_0 err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) _, err = broker.Connected() if !errors.Is(test.expectedBrokerError, ErrNoError) { if !errors.Is(err, test.expectedBrokerError) { t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.expectedBrokerError, err) } } else if test.expectClientErr && err == nil { t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) } else if !test.expectClientErr && err != nil { t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } mockBroker.Close() }) } } // A mock scram client. type MockSCRAMClient struct { done bool } func (m *MockSCRAMClient) Begin(_, _, _ string) (err error) { return nil } func (m *MockSCRAMClient) Step(challenge string) (response string, err error) { if challenge == "" { return "ping", nil } if challenge == "pong" { m.done = true return "", nil } return "", errors.New("failed to authenticate :(") } func (m *MockSCRAMClient) Done() bool { return m.done } var _ SCRAMClient = &MockSCRAMClient{} func TestSASLSCRAMSHAXXX(t *testing.T) { testTable := []struct { name string mockHandshakeErr KError mockSASLAuthErr KError expectClientErr bool scramClient *MockSCRAMClient scramChallengeResp string }{ { name: "SASL/SCRAMSHAXXX successful authentication", mockHandshakeErr: ErrNoError, scramClient: &MockSCRAMClient{}, scramChallengeResp: "pong", }, { name: "SASL/SCRAMSHAXXX SCRAM client step error client", mockHandshakeErr: ErrNoError, mockSASLAuthErr: ErrNoError, scramClient: &MockSCRAMClient{}, scramChallengeResp: "gong", expectClientErr: true, }, { name: "SASL/SCRAMSHAXXX server authentication error", mockHandshakeErr: ErrNoError, mockSASLAuthErr: ErrSASLAuthenticationFailed, scramClient: &MockSCRAMClient{}, scramChallengeResp: "pong", }, { name: "SASL/SCRAMSHAXXX unsupported SCRAM mechanism", mockHandshakeErr: ErrUnsupportedSASLMechanism, mockSASLAuthErr: ErrNoError, scramClient: &MockSCRAMClient{}, scramChallengeResp: "pong", }, } for i, test := range testTable { test := test t.Run(test.name, func(t *testing.T) { // mockBroker mocks underlying network logic and broker responses mockBroker := NewMockBroker(t, 0) broker := NewBroker(mockBroker.Addr()) // broker executes SASL requests against mockBroker broker.requestRate = metrics.NilMeter{} broker.outgoingByteRate = metrics.NilMeter{} broker.incomingByteRate = metrics.NilMeter{} broker.requestSize = metrics.NilHistogram{} broker.responseSize = metrics.NilHistogram{} broker.responseRate = metrics.NilMeter{} broker.requestLatency = metrics.NilHistogram{} broker.requestsInFlight = metrics.NilCounter{} mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t).SetAuthBytes([]byte(test.scramChallengeResp)) mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t).SetEnabledMechanisms([]string{SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512}) if !errors.Is(test.mockSASLAuthErr, ErrNoError) { mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockSASLAuthErr) } if !errors.Is(test.mockHandshakeErr, ErrNoError) { mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) } mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthResponse, "SaslHandshakeRequest": mockSASLHandshakeResponse, }) conf := NewTestConfig() conf.Net.SASL.Mechanism = SASLTypeSCRAMSHA512 conf.Net.SASL.Version = SASLHandshakeV1 conf.Net.SASL.User = "user" conf.Net.SASL.Password = "pass" conf.Net.SASL.Enable = true conf.Net.SASL.SCRAMClientGeneratorFunc = func() SCRAMClient { return test.scramClient } conf.Version = V1_0_0_0 err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) _, err = broker.Connected() if !errors.Is(test.mockSASLAuthErr, ErrNoError) { if !errors.Is(err, test.mockSASLAuthErr) { t.Errorf("[%d]:[%s] Expected %s SASL authentication error, got %s\n", i, test.name, test.mockHandshakeErr, err) } } else if !errors.Is(test.mockHandshakeErr, ErrNoError) { if !errors.Is(err, test.mockHandshakeErr) { t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) } } else if test.expectClientErr && err == nil { t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) } else if !test.expectClientErr && err != nil { t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } mockBroker.Close() }) } } func TestSASLPlainAuth(t *testing.T) { testTable := []struct { name string authidentity string mockAuthErr KError // Mock and expect error returned from SaslAuthenticateRequest mockHandshakeErr KError // Mock and expect error returned from SaslHandshakeRequest expectClientErr bool // Expect an internal client-side error }{ { name: "SASL Plain OK server response", mockAuthErr: ErrNoError, mockHandshakeErr: ErrNoError, }, { name: "SASL Plain OK server response with authidentity", authidentity: "authid", mockAuthErr: ErrNoError, mockHandshakeErr: ErrNoError, }, { name: "SASL Plain authentication failure response", mockAuthErr: ErrSASLAuthenticationFailed, mockHandshakeErr: ErrNoError, }, { name: "SASL Plain handshake failure response", mockAuthErr: ErrNoError, mockHandshakeErr: ErrSASLAuthenticationFailed, }, } for i, test := range testTable { test := test t.Run(test.name, func(t *testing.T) { // mockBroker mocks underlying network logic and broker responses mockBroker := NewMockBroker(t, 0) mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`response_payload`)) if !errors.Is(test.mockAuthErr, ErrNoError) { mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockAuthErr) } mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypePlaintext}) if !errors.Is(test.mockHandshakeErr, ErrNoError) { mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) } mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthResponse, "SaslHandshakeRequest": mockSASLHandshakeResponse, }) // broker executes SASL requests against mockBroker broker := NewBroker(mockBroker.Addr()) broker.requestRate = metrics.NilMeter{} broker.outgoingByteRate = metrics.NilMeter{} broker.incomingByteRate = metrics.NilMeter{} broker.requestSize = metrics.NilHistogram{} broker.responseSize = metrics.NilHistogram{} broker.responseRate = metrics.NilMeter{} broker.requestLatency = metrics.NilHistogram{} broker.requestsInFlight = metrics.NilCounter{} conf := NewTestConfig() conf.Net.SASL.Mechanism = SASLTypePlaintext conf.Net.SASL.AuthIdentity = test.authidentity conf.Net.SASL.Enable = true conf.Net.SASL.User = "token" conf.Net.SASL.Password = "password" conf.Net.SASL.Version = SASLHandshakeV1 conf.Version = V1_0_0_0 err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) _, err = broker.Connected() if err == nil { for _, rr := range mockBroker.History() { switch r := rr.Request.(type) { case *SaslAuthenticateRequest: x := bytes.SplitN(r.SaslAuthBytes, []byte("\x00"), 3) if string(x[0]) != conf.Net.SASL.AuthIdentity { t.Errorf("[%d]:[%s] expected %s auth identity, got %s\n", i, test.name, conf.Net.SASL.AuthIdentity, x[0]) } if string(x[1]) != conf.Net.SASL.User { t.Errorf("[%d]:[%s] expected %s user, got %s\n", i, test.name, conf.Net.SASL.User, x[1]) } if string(x[2]) != conf.Net.SASL.Password { t.Errorf("[%d]:[%s] expected %s password, got %s\n", i, test.name, conf.Net.SASL.Password, x[2]) } } } } if !errors.Is(test.mockAuthErr, ErrNoError) { if !errors.Is(err, test.mockAuthErr) { t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.mockAuthErr, err) } } else if !errors.Is(test.mockHandshakeErr, ErrNoError) { if !errors.Is(err, test.mockHandshakeErr) { t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) } } else if test.expectClientErr && err == nil { t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) } else if !test.expectClientErr && err != nil { t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } mockBroker.Close() }) } } // TestSASLReadTimeout ensures that the broker connection won't block forever // if the remote end never responds after the handshake func TestSASLReadTimeout(t *testing.T) { mockBroker := NewMockBroker(t, 0) defer mockBroker.Close() mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`response_payload`)) mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthResponse, }) broker := NewBroker(mockBroker.Addr()) { broker.requestRate = metrics.NilMeter{} broker.outgoingByteRate = metrics.NilMeter{} broker.incomingByteRate = metrics.NilMeter{} broker.requestSize = metrics.NilHistogram{} broker.responseSize = metrics.NilHistogram{} broker.responseRate = metrics.NilMeter{} broker.requestLatency = metrics.NilHistogram{} broker.requestsInFlight = metrics.NilCounter{} } conf := NewTestConfig() { conf.Net.ReadTimeout = time.Millisecond conf.Net.SASL.Mechanism = SASLTypePlaintext conf.Net.SASL.User = "token" conf.Net.SASL.Password = "password" conf.Net.SASL.Version = SASLHandshakeV1 conf.Net.SASL.Enable = true conf.Version = V1_0_0_0 } err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) _, err = broker.Connected() var nerr net.Error if !(errors.As(err, &nerr) && nerr.Timeout()) { t.Errorf("should never happen - expected read timeout got: %v", err) } } func TestGSSAPIKerberosAuth_Authorize(t *testing.T) { testTable := []struct { name string error error mockKerberosClient bool errorStage string badResponse bool badKeyChecksum bool }{ { name: "Kerberos authentication success", error: nil, mockKerberosClient: true, }, { name: "Kerberos login fails", error: krberror.NewErrorf(krberror.KDCError, "KDC_Error: AS Exchange Error: "+ "kerberos error response from KDC: KRB Error: (24) KDC_ERR_PREAUTH_FAILED Pre-authenti"+ "cation information was invalid - PREAUTH_FAILED"), mockKerberosClient: true, errorStage: "login", }, { name: "Kerberos service ticket fails", error: krberror.NewErrorf(krberror.KDCError, "KDC_Error: AS Exchange Error: "+ "kerberos error response from KDC: KRB Error: (24) KDC_ERR_PREAUTH_FAILED Pre-authenti"+ "cation information was invalid - PREAUTH_FAILED"), mockKerberosClient: true, errorStage: "service_ticket", }, { name: "Kerberos client creation fails", error: errors.New("configuration file could not be opened: krb5.conf open krb5.conf: no such file or directory"), }, { name: "Bad server response, unmarshall key error", error: errors.New("bytes shorter than header length"), badResponse: true, mockKerberosClient: true, }, { name: "Bad token checksum", error: errors.New("checksum mismatch. Computed: 39feb88ac2459f2b77738493, Contained in token: ffffffffffffffff00000000"), badResponse: false, badKeyChecksum: true, mockKerberosClient: true, }, } for i, test := range testTable { test := test t.Run(test.name, func(t *testing.T) { mockBroker := NewMockBroker(t, 0) // broker executes SASL requests against mockBroker mockBroker.SetGSSAPIHandler(func(bytes []byte) []byte { return nil }) broker := NewBroker(mockBroker.Addr()) broker.requestRate = metrics.NilMeter{} broker.outgoingByteRate = metrics.NilMeter{} broker.incomingByteRate = metrics.NilMeter{} broker.requestSize = metrics.NilHistogram{} broker.responseSize = metrics.NilHistogram{} broker.responseRate = metrics.NilMeter{} broker.requestLatency = metrics.NilHistogram{} broker.requestsInFlight = metrics.NilCounter{} conf := NewTestConfig() conf.Net.SASL.Mechanism = SASLTypeGSSAPI conf.Net.SASL.Enable = true conf.Net.SASL.GSSAPI.ServiceName = "kafka" conf.Net.SASL.GSSAPI.KerberosConfigPath = "krb5.conf" conf.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" conf.Net.SASL.GSSAPI.Username = "kafka" conf.Net.SASL.GSSAPI.Password = "kafka" conf.Net.SASL.GSSAPI.KeyTabPath = "kafka.keytab" conf.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH conf.Version = V1_0_0_0 gssapiHandler := KafkaGSSAPIHandler{ client: &MockKerberosClient{}, badResponse: test.badResponse, badKeyChecksum: test.badKeyChecksum, } mockBroker.SetGSSAPIHandler(gssapiHandler.MockKafkaGSSAPI) if test.mockKerberosClient { broker.kerberosAuthenticator.NewKerberosClientFunc = func(config *GSSAPIConfig) (KerberosClient, error) { return &MockKerberosClient{ mockError: test.error, errorStage: test.errorStage, }, nil } } else { broker.kerberosAuthenticator.NewKerberosClientFunc = nil } err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) _, err = broker.Connected() if err != nil && test.error != nil { if test.error.Error() != err.Error() { t.Errorf("[%d] Expected error:%s, got:%s.", i, test.error, err) } } else if (err == nil && test.error != nil) || (err != nil && test.error == nil) { t.Errorf("[%d] Expected error:%s, got:%s.", i, test.error, err) } mockBroker.Close() }) } } func TestBuildClientFirstMessage(t *testing.T) { testTable := []struct { name string token *AccessToken expected []byte expectError bool }{ { name: "Build SASL client initial response with two extensions", token: &AccessToken{ Token: "the-token", Extensions: map[string]string{ "x": "1", "y": "2", }, }, expected: []byte("n,,\x01auth=Bearer the-token\x01x=1\x01y=2\x01\x01"), }, { name: "Build SASL client initial response with no extensions", token: &AccessToken{Token: "the-token"}, expected: []byte("n,,\x01auth=Bearer the-token\x01\x01"), }, { name: "Build SASL client initial response using reserved extension", token: &AccessToken{ Token: "the-token", Extensions: map[string]string{ "auth": "auth-value", }, }, expected: []byte(""), expectError: true, }, } for i, test := range testTable { test := test t.Run(test.name, func(t *testing.T) { actual, err := buildClientFirstMessage(test.token) if !reflect.DeepEqual(test.expected, actual) { t.Errorf("Expected %s, got %s\n", test.expected, actual) } if test.expectError && err == nil { t.Errorf("[%d]:[%s] Expected an error but did not get one", i, test.name) } if !test.expectError && err != nil { t.Errorf("[%d]:[%s] Expected no error but got %s\n", i, test.name, err) } }) } } func TestKip368ReAuthenticationSuccess(t *testing.T) { sessionLifetimeMs := int64(100) mockBroker := NewMockBroker(t, 0) countSaslAuthRequests := func() (count int) { for _, rr := range mockBroker.History() { switch rr.Request.(type) { case *SaslAuthenticateRequest: count++ } } return } mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`response_payload`)). SetSessionLifetimeMs(sessionLifetimeMs) mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypePlaintext}) mockApiVersions := NewMockApiVersionsResponse(t) mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthResponse, "SaslHandshakeRequest": mockSASLHandshakeResponse, "ApiVersionsRequest": mockApiVersions, }) broker := NewBroker(mockBroker.Addr()) conf := NewTestConfig() conf.Net.SASL.Enable = true conf.Net.SASL.Mechanism = SASLTypePlaintext conf.Net.SASL.Version = SASLHandshakeV1 conf.Net.SASL.AuthIdentity = "authid" conf.Net.SASL.User = "token" conf.Net.SASL.Password = "password" broker.conf = conf broker.conf.Version = V2_2_0_0 err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) connected, err := broker.Connected() if err != nil || !connected { t.Fatal(err) } actualSaslAuthRequests := countSaslAuthRequests() if actualSaslAuthRequests != 1 { t.Fatalf("unexpected number of SaslAuthRequests during initial authentication: %d", actualSaslAuthRequests) } timeout := time.After(time.Duration(sessionLifetimeMs) * time.Millisecond) loop: for actualSaslAuthRequests < 2 { select { case <-timeout: break loop default: time.Sleep(10 * time.Millisecond) // put some traffic on the wire _, err = broker.ApiVersions(&ApiVersionsRequest{}) if err != nil { t.Fatal(err) } actualSaslAuthRequests = countSaslAuthRequests() } } if actualSaslAuthRequests < 2 { t.Fatalf("sasl reauth has not occurred within expected timeframe") } mockBroker.Close() } func TestKip368ReAuthenticationFailure(t *testing.T) { sessionLifetimeMs := int64(100) mockBroker := NewMockBroker(t, 0) mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). SetAuthBytes([]byte(`response_payload`)). SetSessionLifetimeMs(sessionLifetimeMs) mockSASLAuthErrorResponse := NewMockSaslAuthenticateResponse(t). SetError(ErrSASLAuthenticationFailed) mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t). SetEnabledMechanisms([]string{SASLTypePlaintext}) mockApiVersions := NewMockApiVersionsResponse(t) mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthResponse, "SaslHandshakeRequest": mockSASLHandshakeResponse, "ApiVersionsRequest": mockApiVersions, }) broker := NewBroker(mockBroker.Addr()) conf := NewTestConfig() conf.Net.SASL.Enable = true conf.Net.SASL.Mechanism = SASLTypePlaintext conf.Net.SASL.Version = SASLHandshakeV1 conf.Net.SASL.AuthIdentity = "authid" conf.Net.SASL.User = "token" conf.Net.SASL.Password = "password" broker.conf = conf broker.conf.Version = V2_2_0_0 err := broker.Open(conf) if err != nil { t.Fatal(err) } t.Cleanup(func() { _ = broker.Close() }) connected, err := broker.Connected() if err != nil || !connected { t.Fatal(err) } mockBroker.SetHandlerByMap(map[string]MockResponse{ "SaslAuthenticateRequest": mockSASLAuthErrorResponse, "SaslHandshakeRequest": mockSASLHandshakeResponse, "ApiVersionsRequest": mockApiVersions, }) timeout := time.After(time.Duration(sessionLifetimeMs) * time.Millisecond) var apiVersionError error loop: for apiVersionError == nil { select { case <-timeout: break loop default: time.Sleep(10 * time.Millisecond) // put some traffic on the wire _, apiVersionError = broker.ApiVersions(&ApiVersionsRequest{}) } } if !errors.Is(apiVersionError, ErrSASLAuthenticationFailed) { t.Fatalf("sasl reauth has not failed in the expected way %v", apiVersionError) } mockBroker.Close() } // We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake var brokerTestTable = []struct { version KafkaVersion name string response []byte runner func(*testing.T, *Broker) }{ { V0_10_0_0, "MetadataRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := MetadataRequest{} response, err := broker.GetMetadata(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Metadata request got no response!") } }, }, { V0_10_0_0, "ConsumerMetadataRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := ConsumerMetadataRequest{} response, err := broker.GetConsumerMetadata(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Consumer Metadata request got no response!") } }, }, { V0_10_0_0, "ProduceRequest (NoResponse)", []byte{}, func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = NoResponse response, err := broker.Produce(&request) if err != nil { t.Error(err) } if response != nil { t.Error("Produce request with NoResponse got a response!") } }, }, { V0_10_0_0, "ProduceRequest (NoResponse) using AsyncProduce", []byte{}, func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = NoResponse err := broker.AsyncProduce(&request, nil) if err != nil { t.Error(err) } }, }, { V0_10_0_0, "ProduceRequest (WaitForLocal)", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = WaitForLocal response, err := broker.Produce(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Produce request without NoResponse got no response!") } }, }, { V0_10_0_0, "ProduceRequest (WaitForLocal) using AsyncProduce", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = WaitForLocal produceResPromise := newProduceResponsePromise() err := broker.AsyncProduce(&request, produceResPromise.callback) if err != nil { t.Error(err) } response, err := produceResPromise.Get() if err != nil { t.Error(err) } if response == nil { t.Error("Produce request without NoResponse got no response!") } }, }, { V0_10_0_0, "FetchRequest", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := FetchRequest{} response, err := broker.Fetch(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Fetch request got no response!") } }, }, { V0_10_0_0, "OffsetFetchRequest", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := OffsetFetchRequest{} response, err := broker.FetchOffset(&request) if err != nil { t.Error(err) } if response == nil { t.Error("OffsetFetch request got no response!") } }, }, { V0_10_0_0, "OffsetCommitRequest", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := OffsetCommitRequest{} response, err := broker.CommitOffset(&request) if err != nil { t.Error(err) } if response == nil { t.Error("OffsetCommit request got no response!") } }, }, { V0_10_0_0, "OffsetRequest", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := OffsetRequest{} response, err := broker.GetAvailableOffsets(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Offset request got no response!") } }, }, { V0_10_0_0, "JoinGroupRequest", []byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := JoinGroupRequest{} response, err := broker.JoinGroup(&request) if err != nil { t.Error(err) } if response == nil { t.Error("JoinGroup request got no response!") } }, }, { V0_10_0_0, "SyncGroupRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := SyncGroupRequest{} response, err := broker.SyncGroup(&request) if err != nil { t.Error(err) } if response == nil { t.Error("SyncGroup request got no response!") } }, }, { V0_10_0_0, "LeaveGroupRequest", []byte{0x00, 0x00}, func(t *testing.T, broker *Broker) { request := LeaveGroupRequest{} response, err := broker.LeaveGroup(&request) if err != nil { t.Error(err) } if response == nil { t.Error("LeaveGroup request got no response!") } }, }, { V0_10_0_0, "HeartbeatRequest", []byte{0x00, 0x00}, func(t *testing.T, broker *Broker) { request := HeartbeatRequest{} response, err := broker.Heartbeat(&request) if err != nil { t.Error(err) } if response == nil { t.Error("Heartbeat request got no response!") } }, }, { V0_10_0_0, "ListGroupsRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := ListGroupsRequest{} response, err := broker.ListGroups(&request) if err != nil { t.Error(err) } if response == nil { t.Error("ListGroups request got no response!") } }, }, { V0_10_0_0, "DescribeGroupsRequest", []byte{0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := DescribeGroupsRequest{} response, err := broker.DescribeGroups(&request) if err != nil { t.Error(err) } if response == nil { t.Error("DescribeGroups request got no response!") } }, }, { V0_10_0_0, "ApiVersionsRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := ApiVersionsRequest{} response, err := broker.ApiVersions(&request) if err != nil { t.Error(err) } if response == nil { t.Error("ApiVersions request got no response!") } }, }, { V1_1_0_0, "DeleteGroupsRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := DeleteGroupsRequest{} response, err := broker.DeleteGroups(&request) if err != nil { t.Error(err) } if response == nil { t.Error("DeleteGroups request got no response!") } }, }, { V2_4_0_0, "DeleteOffsetsRequest", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, func(t *testing.T, broker *Broker) { request := DeleteOffsetsRequest{} response, err := broker.DeleteOffsets(&request) if err != nil { t.Error(err) } if response == nil { t.Error("DeleteGroups request got no response!") } }, }, } // We are testing the handling of failed request or corrupt responses. var brokerFailedReqTestTable = []struct { version KafkaVersion name string stopBroker bool response []byte runner func(*testing.T, *Broker) }{ { version: V0_10_0_0, name: "ProduceRequest (NoResponse) using AsyncProduce and stopped broker", stopBroker: true, runner: func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = NoResponse err := broker.AsyncProduce(&request, nil) if err == nil { t.Fatal("Expected a non nil error because broker is not listening") } t.Log("Got error:", err) }, }, { version: V0_10_0_0, name: "ProduceRequest (WaitForLocal) using AsyncProduce and stopped broker", stopBroker: true, runner: func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = WaitForLocal err := broker.AsyncProduce(&request, nil) if err == nil { t.Fatal("Expected a non nil error because broker is not listening") } t.Log("Got error:", err) }, }, { version: V0_10_0_0, name: "ProduceRequest (WaitForLocal) using AsyncProduce and no response", // A nil response means the mock broker will ignore the request leading to a read timeout response: nil, runner: func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = WaitForLocal produceResPromise := newProduceResponsePromise() err := broker.AsyncProduce(&request, produceResPromise.callback) if err != nil { t.Error(err) } response, err := produceResPromise.Get() if err == nil { t.Fatal("Expected a non nil error because broker is not listening") } t.Log("Got error:", err) if response != nil { t.Error("Produce request should have failed, got response:", response) } }, }, { version: V0_10_0_0, name: "ProduceRequest (WaitForLocal) using AsyncProduce and corrupt response", // Corrupt response (3 bytes vs 4) response: []byte{0x00, 0x00, 0x00}, runner: func(t *testing.T, broker *Broker) { request := ProduceRequest{} request.RequiredAcks = WaitForLocal produceResPromise := newProduceResponsePromise() err := broker.AsyncProduce(&request, produceResPromise.callback) if err != nil { t.Error(err) } response, err := produceResPromise.Get() if err == nil { t.Fatal(err) } t.Log("Got error:", err) if response != nil { t.Error("Produce request should have failed, got response:", response) } }, }, } func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) { metricValidators := newMetricValidators() mockBrokerBytesRead := mockBrokerMetrics.bytesRead mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten // Check that the number of bytes sent corresponds to what the mock broker received metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten)) if mockBrokerBytesWritten == 0 { // This a ProduceRequest with NoResponse metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0)) metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0)) metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0)) } else { metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1)) metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1)) metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten)) } // Check that the number of bytes received corresponds to what the mock broker sent metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead)) metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1)) metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1)) metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead)) // Check that there is no more requests in flight metricValidators.registerForAllBrokers(broker, counterValidator("requests-in-flight", 0)) // Run the validators metricValidators.run(t, broker.conf.MetricRegistry) } func BenchmarkBroker_Open(b *testing.B) { mb := NewMockBroker(nil, 0) defer mb.Close() broker := NewBroker(mb.Addr()) // Set the broker id in order to validate local broker metrics broker.id = 0 metrics.UseNilMetrics = false conf := NewTestConfig() conf.Version = V1_0_0_0 for i := 0; i < b.N; i++ { err := broker.Open(conf) if err != nil { b.Fatal(err) } broker.Close() } } func BenchmarkBroker_No_Metrics_Open(b *testing.B) { mb := NewMockBroker(nil, 0) defer mb.Close() broker := NewBroker(mb.Addr()) broker.id = 0 metrics.UseNilMetrics = true conf := NewTestConfig() conf.Version = V1_0_0_0 for i := 0; i < b.N; i++ { err := broker.Open(conf) if err != nil { b.Fatal(err) } broker.Close() } } func Test_handleThrottledResponse(t *testing.T) { mb := NewMockBroker(nil, 0) defer mb.Close() broker := NewBroker(mb.Addr()) broker.id = 0 conf := NewTestConfig() conf.Version = V1_0_0_0 throttleTimeMs := 100 throttleTime := time.Duration(throttleTimeMs) * time.Millisecond tests := []struct { name string response protocolBody expectDelay bool }{ { name: "throttled response w/millisecond field", response: &MetadataResponse{ ThrottleTimeMs: int32(throttleTimeMs), }, expectDelay: true, }, { name: "not throttled response w/millisecond field", response: &MetadataResponse{ ThrottleTimeMs: 0, }, }, { name: "throttled response w/time.Duration field", response: &ProduceResponse{ ThrottleTime: throttleTime, }, expectDelay: true, }, { name: "not throttled response w/time.Duration field", response: &ProduceResponse{ ThrottleTime: time.Duration(0), }, }, { name: "not throttled response with no throttle time field", response: &SaslHandshakeResponse{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { broker.metricRegistry = metrics.NewRegistry() broker.brokerThrottleTime = broker.registerHistogram("throttle-time-in-ms") startTime := time.Now() broker.handleThrottledResponse(tt.response) broker.waitIfThrottled() if tt.expectDelay { if time.Since(startTime) < throttleTime { t.Fatal("expected throttling to cause delay") } if broker.brokerThrottleTime.Min() != int64(throttleTimeMs) { t.Fatal("expected throttling to update metrics") } } else { if time.Since(startTime) > throttleTime { t.Fatal("expected no throttling delay") } if broker.brokerThrottleTime.Count() != 0 { t.Fatal("expected no metrics update") } } }) } t.Run("test second throttle timer overrides first", func(t *testing.T) { broker.metricRegistry = metrics.NewRegistry() broker.brokerThrottleTime = broker.registerHistogram("throttle-time-in-ms") broker.handleThrottledResponse(&MetadataResponse{ ThrottleTimeMs: int32(throttleTimeMs), }) firstTimer := broker.throttleTimer broker.handleThrottledResponse(&MetadataResponse{ ThrottleTimeMs: int32(throttleTimeMs * 2), }) if firstTimer.Stop() { t.Fatal("expected first timer to be stopped") } startTime := time.Now() broker.waitIfThrottled() if time.Since(startTime) < throttleTime*2 { t.Fatal("expected throttling to use second delay") } if broker.brokerThrottleTime.Min() != int64(throttleTimeMs) { t.Fatal("expected throttling to update metrics") } if broker.brokerThrottleTime.Max() != int64(throttleTimeMs*2) { t.Fatal("expected throttling to update metrics") } }) } golang-github-ibm-sarama-1.43.2/client.go000066400000000000000000001227431461256741300201660ustar00rootroot00000000000000package sarama import ( "context" "errors" "math" "math/rand" "net" "sort" "strings" "sync" "sync/atomic" "time" "golang.org/x/net/proxy" ) // Client is a generic Kafka client. It manages connections to one or more Kafka brokers. // You MUST call Close() on a client to avoid leaks, it will not be garbage-collected // automatically when it passes out of scope. It is safe to share a client amongst many // users, however Kafka will process requests from a single client strictly in serial, // so it is generally more efficient to use the default one client per producer/consumer. type Client interface { // Config returns the Config struct of the client. This struct should not be // altered after it has been created. Config() *Config // Controller returns the cluster controller broker. It will return a // locally cached value if it's available. You can call RefreshController // to update the cached value. Requires Kafka 0.10 or higher. Controller() (*Broker, error) // RefreshController retrieves the cluster controller from fresh metadata // and stores it in the local cache. Requires Kafka 0.10 or higher. RefreshController() (*Broker, error) // Brokers returns the current set of active brokers as retrieved from cluster metadata. Brokers() []*Broker // Broker returns the active Broker if available for the broker ID. Broker(brokerID int32) (*Broker, error) // Topics returns the set of available topics as retrieved from cluster metadata. Topics() ([]string, error) // Partitions returns the sorted list of all partition IDs for the given topic. Partitions(topic string) ([]int32, error) // WritablePartitions returns the sorted list of all writable partition IDs for // the given topic, where "writable" means "having a valid leader accepting // writes". WritablePartitions(topic string) ([]int32, error) // Leader returns the broker object that is the leader of the current // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) // LeaderAndEpoch returns the leader and its epoch for the current // topic/partition, as determined by querying the cluster metadata. LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) // Replicas returns the set of all replica IDs for the given partition. Replicas(topic string, partitionID int32) ([]int32, error) // InSyncReplicas returns the set of all in-sync replica IDs for the given // partition. In-sync replicas are replicas which are fully caught up with // the partition leader. InSyncReplicas(topic string, partitionID int32) ([]int32, error) // OfflineReplicas returns the set of all offline replica IDs for the given // partition. Offline replicas are replicas which are offline OfflineReplicas(topic string, partitionID int32) ([]int32, error) // RefreshBrokers takes a list of addresses to be used as seed brokers. // Existing broker connections are closed and the updated list of seed brokers // will be used for the next metadata fetch. RefreshBrokers(addrs []string) error // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. RefreshMetadata(topics ...string) error // GetOffset queries the cluster to get the most recent available offset at the // given time (in milliseconds) on the topic/partition combination. // Time should be OffsetOldest for the earliest available offset, // OffsetNewest for the offset of the message that will be produced next, or a time. GetOffset(topic string, partitionID int32, time int64) (int64, error) // Coordinator returns the coordinating broker for a consumer group. It will // return a locally cached value if it's available. You can call // RefreshCoordinator to update the cached value. This function only works on // Kafka 0.8.2 and higher. Coordinator(consumerGroup string) (*Broker, error) // RefreshCoordinator retrieves the coordinator for a consumer group and stores it // in local cache. This function only works on Kafka 0.8.2 and higher. RefreshCoordinator(consumerGroup string) error // Coordinator returns the coordinating broker for a transaction id. It will // return a locally cached value if it's available. You can call // RefreshCoordinator to update the cached value. This function only works on // Kafka 0.11.0.0 and higher. TransactionCoordinator(transactionID string) (*Broker, error) // RefreshCoordinator retrieves the coordinator for a transaction id and stores it // in local cache. This function only works on Kafka 0.11.0.0 and higher. RefreshTransactionCoordinator(transactionID string) error // InitProducerID retrieves information required for Idempotent Producer InitProducerID() (*InitProducerIDResponse, error) // LeastLoadedBroker retrieves broker that has the least responses pending LeastLoadedBroker() *Broker // Close shuts down all broker connections managed by this client. It is required // to call this function before a client object passes out of scope, as it will // otherwise leak memory. You must close any Producers or Consumers using a client // before you close the client. Close() error // Closed returns true if the client has already had Close called on it Closed() bool } const ( // OffsetNewest stands for the log head offset, i.e. the offset that will be // assigned to the next message that will be produced to the partition. You // can send this to a client's GetOffset method to get this offset, or when // calling ConsumePartition to start consuming new messages. OffsetNewest int64 = -1 // OffsetOldest stands for the oldest offset available on the broker for a // partition. You can send this to a client's GetOffset method to get this // offset, or when calling ConsumePartition to start consuming from the // oldest offset that is still available on the broker. OffsetOldest int64 = -2 ) type client struct { // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater // the broker addresses given to us through the constructor are not guaranteed to be returned in // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) // so we store them separately seedBrokers []*Broker deadSeeds []*Broker controllerID int32 // cluster controller broker id brokers map[int32]*Broker // maps broker ids to brokers metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata metadataTopics map[string]none // topics that need to collect metadata coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs transactionCoordinators map[string]int32 // Maps transaction ids to coordinating broker IDs // If the number of partitions is large, we can get some churn calling cachedPartitions, // so the result is cached. It is important to update this value whenever metadata is changed cachedPartitionsResults map[string][maxPartitionIndex][]int32 lock sync.RWMutex // protects access to the maps that hold cluster state. } // NewClient creates a new Client. It connects to one of the given broker addresses // and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot // be retrieved from any of the given broker addresses, the client is not created. func NewClient(addrs []string, conf *Config) (Client, error) { DebugLogger.Println("Initializing new client") if conf == nil { conf = NewConfig() } if err := conf.Validate(); err != nil { return nil, err } if len(addrs) < 1 { return nil, ConfigurationError("You must provide at least one broker address") } if strings.Contains(addrs[0], ".servicebus.windows.net") { if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) { Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility") conf.Version = V1_0_0_0 } } client := &client{ conf: conf, closer: make(chan none), closed: make(chan none), brokers: make(map[int32]*Broker), metadata: make(map[string]map[int32]*PartitionMetadata), metadataTopics: make(map[string]none), cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), coordinators: make(map[string]int32), transactionCoordinators: make(map[string]int32), } if conf.Net.ResolveCanonicalBootstrapServers { var err error addrs, err = client.resolveCanonicalNames(addrs) if err != nil { return nil, err } } client.randomizeSeedBrokers(addrs) if conf.Metadata.Full { // do an initial fetch of all cluster metadata by specifying an empty list of topics err := client.RefreshMetadata() if err == nil { } else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) { // indicates that maybe part of the cluster is down, but is not fatal to creating the client Logger.Println(err) } else { close(client.closed) // we haven't started the background updater yet, so we have to do this manually _ = client.Close() return nil, err } } go withRecover(client.backgroundMetadataUpdater) DebugLogger.Println("Successfully initialized new client") return client, nil } func (client *client) Config() *Config { return client.conf } func (client *client) Brokers() []*Broker { client.lock.RLock() defer client.lock.RUnlock() brokers := make([]*Broker, 0, len(client.brokers)) for _, broker := range client.brokers { brokers = append(brokers, broker) } return brokers } func (client *client) Broker(brokerID int32) (*Broker, error) { client.lock.RLock() defer client.lock.RUnlock() broker, ok := client.brokers[brokerID] if !ok { return nil, ErrBrokerNotFound } _ = broker.Open(client.conf) return broker, nil } func (client *client) InitProducerID() (*InitProducerIDResponse, error) { // FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go? brokerErrors := make([]error, 0) for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { request := &InitProducerIDRequest{} if client.conf.Version.IsAtLeast(V2_7_0_0) { // Version 4 adds the support for new error code PRODUCER_FENCED. request.Version = 4 } else if client.conf.Version.IsAtLeast(V2_5_0_0) { // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error request.Version = 3 } else if client.conf.Version.IsAtLeast(V2_4_0_0) { // Version 2 is the first flexible version. request.Version = 2 } else if client.conf.Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. request.Version = 1 } response, err := broker.InitProducerID(request) if err == nil { return response, nil } else { // some error, remove that broker and try again Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) _ = broker.Close() brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) } } return nil, Wrap(ErrOutOfBrokers, brokerErrors...) } func (client *client) Close() error { if client.Closed() { // Chances are this is being called from a defer() and the error will go unobserved // so we go ahead and log the event in this case. Logger.Printf("Close() called on already closed client") return ErrClosedClient } // shutdown and wait for the background thread before we take the lock, to avoid races close(client.closer) <-client.closed client.lock.Lock() defer client.lock.Unlock() DebugLogger.Println("Closing Client") for _, broker := range client.brokers { safeAsyncClose(broker) } for _, broker := range client.seedBrokers { safeAsyncClose(broker) } client.brokers = nil client.metadata = nil client.metadataTopics = nil return nil } func (client *client) Closed() bool { client.lock.RLock() defer client.lock.RUnlock() return client.brokers == nil } func (client *client) Topics() ([]string, error) { if client.Closed() { return nil, ErrClosedClient } client.lock.RLock() defer client.lock.RUnlock() ret := make([]string, 0, len(client.metadata)) for topic := range client.metadata { ret = append(ret, topic) } return ret, nil } func (client *client) MetadataTopics() ([]string, error) { if client.Closed() { return nil, ErrClosedClient } client.lock.RLock() defer client.lock.RUnlock() ret := make([]string, 0, len(client.metadataTopics)) for topic := range client.metadataTopics { ret = append(ret, topic) } return ret, nil } func (client *client) Partitions(topic string) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } partitions := client.cachedPartitions(topic, allPartitions) if len(partitions) == 0 { err := client.RefreshMetadata(topic) if err != nil { return nil, err } partitions = client.cachedPartitions(topic, allPartitions) } // no partitions found after refresh metadata if len(partitions) == 0 { return nil, ErrUnknownTopicOrPartition } return partitions, nil } func (client *client) WritablePartitions(topic string) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } partitions := client.cachedPartitions(topic, writablePartitions) // len==0 catches when it's nil (no such topic) and the odd case when every single // partition is undergoing leader election simultaneously. Callers have to be able to handle // this function returning an empty slice (which is a valid return value) but catching it // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers // a metadata refresh as a nicety so callers can just try again and don't have to manually // trigger a refresh (otherwise they'd just keep getting a stale cached copy). if len(partitions) == 0 { err := client.RefreshMetadata(topic) if err != nil { return nil, err } partitions = client.cachedPartitions(topic, writablePartitions) } if partitions == nil { return nil, ErrUnknownTopicOrPartition } return partitions, nil } func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } metadata := client.cachedMetadata(topic, partitionID) if metadata == nil { err := client.RefreshMetadata(topic) if err != nil { return nil, err } metadata = client.cachedMetadata(topic, partitionID) } if metadata == nil { return nil, ErrUnknownTopicOrPartition } if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Replicas), metadata.Err } return dupInt32Slice(metadata.Replicas), nil } func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } metadata := client.cachedMetadata(topic, partitionID) if metadata == nil { err := client.RefreshMetadata(topic) if err != nil { return nil, err } metadata = client.cachedMetadata(topic, partitionID) } if metadata == nil { return nil, ErrUnknownTopicOrPartition } if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Isr), metadata.Err } return dupInt32Slice(metadata.Isr), nil } func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } metadata := client.cachedMetadata(topic, partitionID) if metadata == nil { err := client.RefreshMetadata(topic) if err != nil { return nil, err } metadata = client.cachedMetadata(topic, partitionID) } if metadata == nil { return nil, ErrUnknownTopicOrPartition } if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.OfflineReplicas), metadata.Err } return dupInt32Slice(metadata.OfflineReplicas), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { leader, _, err := client.LeaderAndEpoch(topic, partitionID) return leader, err } func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) { if client.Closed() { return nil, -1, ErrClosedClient } leader, epoch, err := client.cachedLeader(topic, partitionID) if leader == nil { err = client.RefreshMetadata(topic) if err != nil { return nil, -1, err } leader, epoch, err = client.cachedLeader(topic, partitionID) } return leader, epoch, err } func (client *client) RefreshBrokers(addrs []string) error { if client.Closed() { return ErrClosedClient } client.lock.Lock() defer client.lock.Unlock() for _, broker := range client.brokers { safeAsyncClose(broker) } client.brokers = make(map[int32]*Broker) for _, broker := range client.seedBrokers { safeAsyncClose(broker) } for _, broker := range client.deadSeeds { safeAsyncClose(broker) } client.seedBrokers = nil client.deadSeeds = nil client.randomizeSeedBrokers(addrs) return nil } func (client *client) RefreshMetadata(topics ...string) error { if client.Closed() { return ErrClosedClient } // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return } } deadline := time.Time{} if client.conf.Metadata.Timeout > 0 { deadline = time.Now().Add(client.conf.Metadata.Timeout) } return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) } func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) { if client.Closed() { return -1, ErrClosedClient } offset, err := client.getOffset(topic, partitionID, timestamp) if err != nil { if err := client.RefreshMetadata(topic); err != nil { return -1, err } return client.getOffset(topic, partitionID, timestamp) } return offset, err } func (client *client) Controller() (*Broker, error) { if client.Closed() { return nil, ErrClosedClient } if !client.conf.Version.IsAtLeast(V0_10_0_0) { return nil, ErrUnsupportedVersion } controller := client.cachedController() if controller == nil { if err := client.refreshMetadata(); err != nil { return nil, err } controller = client.cachedController() } if controller == nil { return nil, ErrControllerNotAvailable } _ = controller.Open(client.conf) return controller, nil } // deregisterController removes the cached controllerID func (client *client) deregisterController() { client.lock.Lock() defer client.lock.Unlock() if controller, ok := client.brokers[client.controllerID]; ok { _ = controller.Close() delete(client.brokers, client.controllerID) } } // RefreshController retrieves the cluster controller from fresh metadata // and stores it in the local cache. Requires Kafka 0.10 or higher. func (client *client) RefreshController() (*Broker, error) { if client.Closed() { return nil, ErrClosedClient } client.deregisterController() if err := client.refreshMetadata(); err != nil { return nil, err } controller := client.cachedController() if controller == nil { return nil, ErrControllerNotAvailable } _ = controller.Open(client.conf) return controller, nil } func (client *client) Coordinator(consumerGroup string) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient } coordinator := client.cachedCoordinator(consumerGroup) if coordinator == nil { if err := client.RefreshCoordinator(consumerGroup); err != nil { return nil, err } coordinator = client.cachedCoordinator(consumerGroup) } if coordinator == nil { return nil, ErrConsumerCoordinatorNotAvailable } _ = coordinator.Open(client.conf) return coordinator, nil } func (client *client) RefreshCoordinator(consumerGroup string) error { if client.Closed() { return ErrClosedClient } response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max) if err != nil { return err } client.lock.Lock() defer client.lock.Unlock() client.registerBroker(response.Coordinator) client.coordinators[consumerGroup] = response.Coordinator.ID() return nil } func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient } coordinator := client.cachedTransactionCoordinator(transactionID) if coordinator == nil { if err := client.RefreshTransactionCoordinator(transactionID); err != nil { return nil, err } coordinator = client.cachedTransactionCoordinator(transactionID) } if coordinator == nil { return nil, ErrConsumerCoordinatorNotAvailable } _ = coordinator.Open(client.conf) return coordinator, nil } func (client *client) RefreshTransactionCoordinator(transactionID string) error { if client.Closed() { return ErrClosedClient } response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max) if err != nil { return err } client.lock.Lock() defer client.lock.Unlock() client.registerBroker(response.Coordinator) client.transactionCoordinators[transactionID] = response.Coordinator.ID() return nil } // private broker management helpers func (client *client) randomizeSeedBrokers(addrs []string) { random := rand.New(rand.NewSource(time.Now().UnixNano())) for _, index := range random.Perm(len(addrs)) { client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) } } func (client *client) updateBroker(brokers []*Broker) { currentBroker := make(map[int32]*Broker, len(brokers)) for _, broker := range brokers { currentBroker[broker.ID()] = broker if client.brokers[broker.ID()] == nil { // add new broker client.brokers[broker.ID()] = broker DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) } } for id, broker := range client.brokers { if _, exist := currentBroker[id]; !exist { // remove old broker safeAsyncClose(broker) delete(client.brokers, id) Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) } } } // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered // in the brokers map. It returns the broker that is registered, which may be the provided broker, // or a previously registered Broker instance. You must hold the write lock before calling this function. func (client *client) registerBroker(broker *Broker) { if client.brokers == nil { Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) return } if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) } else if broker.Addr() != client.brokers[broker.ID()].Addr() { safeAsyncClose(client.brokers[broker.ID()]) client.brokers[broker.ID()] = broker Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) } } // deregisterBroker removes a broker from the broker list, and if it's // not in the broker list, removes it from seedBrokers. func (client *client) deregisterBroker(broker *Broker) { client.lock.Lock() defer client.lock.Unlock() _, ok := client.brokers[broker.ID()] if ok { Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) delete(client.brokers, broker.ID()) return } if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { client.deadSeeds = append(client.deadSeeds, broker) client.seedBrokers = client.seedBrokers[1:] } } func (client *client) resurrectDeadBrokers() { client.lock.Lock() defer client.lock.Unlock() Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) client.deadSeeds = nil } // LeastLoadedBroker returns the broker with the least pending requests. // Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers. func (client *client) LeastLoadedBroker() *Broker { client.lock.RLock() defer client.lock.RUnlock() var leastLoadedBroker *Broker pendingRequests := math.MaxInt for _, broker := range client.brokers { if pendingRequests > broker.ResponseSize() { pendingRequests = broker.ResponseSize() leastLoadedBroker = broker } } if leastLoadedBroker != nil { _ = leastLoadedBroker.Open(client.conf) return leastLoadedBroker } if len(client.seedBrokers) > 0 { _ = client.seedBrokers[0].Open(client.conf) return client.seedBrokers[0] } return leastLoadedBroker } // private caching/lazy metadata helpers type partitionType int const ( allPartitions partitionType = iota writablePartitions // If you add any more types, update the partition cache in update() // Ensure this is the last partition type value maxPartitionIndex ) func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { client.lock.RLock() defer client.lock.RUnlock() partitions := client.metadata[topic] if partitions != nil { return partitions[partitionID] } return nil } func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { client.lock.RLock() defer client.lock.RUnlock() partitions, exists := client.cachedPartitionsResults[topic] if !exists { return nil } return partitions[partitionSet] } func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { partitions := client.metadata[topic] if partitions == nil { return nil } ret := make([]int32, 0, len(partitions)) for _, partition := range partitions { if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) { continue } ret = append(ret, partition.ID) } sort.Sort(int32Slice(ret)) return ret } func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) { client.lock.RLock() defer client.lock.RUnlock() partitions := client.metadata[topic] if partitions != nil { metadata, ok := partitions[partitionID] if ok { if errors.Is(metadata.Err, ErrLeaderNotAvailable) { return nil, -1, ErrLeaderNotAvailable } b := client.brokers[metadata.Leader] if b == nil { return nil, -1, ErrLeaderNotAvailable } _ = b.Open(client.conf) return b, metadata.LeaderEpoch, nil } } return nil, -1, ErrUnknownTopicOrPartition } func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) { broker, err := client.Leader(topic, partitionID) if err != nil { return -1, err } request := &OffsetRequest{} if client.conf.Version.IsAtLeast(V2_1_0_0) { // Version 4 adds the current leader epoch, which is used for fencing. request.Version = 4 } else if client.conf.Version.IsAtLeast(V2_0_0_0) { // Version 3 is the same as version 2. request.Version = 3 } else if client.conf.Version.IsAtLeast(V0_11_0_0) { // Version 2 adds the isolation level, which is used for transactional reads. request.Version = 2 } else if client.conf.Version.IsAtLeast(V0_10_1_0) { // Version 1 removes MaxNumOffsets. From this version forward, only a single // offset can be returned. request.Version = 1 } request.AddBlock(topic, partitionID, timestamp, 1) response, err := broker.GetAvailableOffsets(request) if err != nil { _ = broker.Close() return -1, err } block := response.GetBlock(topic, partitionID) if block == nil { _ = broker.Close() return -1, ErrIncompleteResponse } if !errors.Is(block.Err, ErrNoError) { return -1, block.Err } if len(block.Offsets) != 1 { return -1, ErrOffsetOutOfRange } return block.Offsets[0], nil } // core metadata update logic func (client *client) backgroundMetadataUpdater() { defer close(client.closed) if client.conf.Metadata.RefreshFrequency == time.Duration(0) { return } ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) defer ticker.Stop() for { select { case <-ticker.C: if err := client.refreshMetadata(); err != nil { Logger.Println("Client background metadata update:", err) } case <-client.closer: return } } } func (client *client) refreshMetadata() error { var topics []string if !client.conf.Metadata.Full { if specificTopics, err := client.MetadataTopics(); err != nil { return err } else if len(specificTopics) == 0 { return ErrNoTopicsToUpdateMetadata } else { topics = specificTopics } } if err := client.RefreshMetadata(topics...); err != nil { return err } return nil } func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error { pastDeadline := func(backoff time.Duration) bool { if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) { // we are past the deadline return true } return false } retry := func(err error) error { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) if pastDeadline(backoff) { Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") return err } if backoff > 0 { time.Sleep(backoff) } t := atomic.LoadInt64(&client.updateMetadataMs) if time.Since(time.UnixMilli(t)) < backoff { return err } attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } broker := client.LeastLoadedBroker() brokerErrors := make([]error, 0) for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() { allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) } else { allowAutoTopicCreation = false DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } req := NewMetadataRequest(client.conf.Version, topics) req.AllowAutoTopicCreation = allowAutoTopicCreation atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError var packetEncodingError PacketEncodingError if err == nil { // When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924). if len(response.Brokers) == 0 { Logger.Println("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr) _ = broker.Close() client.deregisterBroker(broker) continue } allKnownMetaData := len(topics) == 0 // valid response, use it shouldRetry, err := client.updateMetadata(response, allKnownMetaData) if shouldRetry { Logger.Println("client/metadata found some partitions to be leaderless") return retry(err) // note: err can be nil } return err } else if errors.As(err, &packetEncodingError) { // didn't even send, return the error return err } else if errors.As(err, &kerror) { // if SASL auth error return as this _should_ be a non retryable err for all brokers if errors.Is(err, ErrSASLAuthenticationFailed) { Logger.Println("client/metadata failed SASL authentication") return err } if errors.Is(err, ErrTopicAuthorizationFailed) { Logger.Println("client is not authorized to access this topic. The topics were: ", topics) return err } // else remove that broker and try again Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) _ = broker.Close() client.deregisterBroker(broker) } else { // some other error, remove that broker and try again Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) brokerErrors = append(brokerErrors, err) _ = broker.Close() client.deregisterBroker(broker) } } error := Wrap(ErrOutOfBrokers, brokerErrors...) if broker != nil { Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) return retry(error) } Logger.Println("client/metadata no available broker to send metadata request to") client.resurrectDeadBrokers() return retry(error) } // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { if client.Closed() { return } client.lock.Lock() defer client.lock.Unlock() // For all the brokers we received: // - if it is a new ID, save it // - if it is an existing ID, but the address we have is stale, discard the old one and save it // - if some brokers is not exist in it, remove old broker // - otherwise ignore it, replacing our existing one would just bounce the connection client.updateBroker(data.Brokers) client.controllerID = data.ControllerID if allKnownMetaData { client.metadata = make(map[string]map[int32]*PartitionMetadata) client.metadataTopics = make(map[string]none) client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) } for _, topic := range data.Topics { // topics must be added firstly to `metadataTopics` to guarantee that all // requested topics must be recorded to keep them trackable for periodically // metadata refresh. if _, exists := client.metadataTopics[topic.Name]; !exists { client.metadataTopics[topic.Name] = none{} } delete(client.metadata, topic.Name) delete(client.cachedPartitionsResults, topic.Name) switch topic.Err { case ErrNoError: // no-op case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results err = topic.Err continue case ErrUnknownTopicOrPartition: // retry, do not store partial partition results err = topic.Err retry = true continue case ErrLeaderNotAvailable: // retry, but store partial partition results retry = true default: // don't retry, don't store partial results Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) err = topic.Err continue } client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) for _, partition := range topic.Partitions { client.metadata[topic.Name][partition.ID] = partition if errors.Is(partition.Err, ErrLeaderNotAvailable) { retry = true } } var partitionCache [maxPartitionIndex][]int32 partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) client.cachedPartitionsResults[topic.Name] = partitionCache } return } func (client *client) cachedCoordinator(consumerGroup string) *Broker { client.lock.RLock() defer client.lock.RUnlock() if coordinatorID, ok := client.coordinators[consumerGroup]; ok { return client.brokers[coordinatorID] } return nil } func (client *client) cachedTransactionCoordinator(transactionID string) *Broker { client.lock.RLock() defer client.lock.RUnlock() if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok { return client.brokers[coordinatorID] } return nil } func (client *client) cachedController() *Broker { client.lock.RLock() defer client.lock.RUnlock() return client.brokers[client.controllerID] } func (client *client) computeBackoff(attemptsRemaining int) time.Duration { if client.conf.Metadata.Retry.BackoffFunc != nil { maxRetries := client.conf.Metadata.Retry.Max retries := maxRetries - attemptsRemaining return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) } return client.conf.Metadata.Retry.Backoff } func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) { retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } brokerErrors := make([]error, 0) for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr()) request := new(FindCoordinatorRequest) request.CoordinatorKey = coordinatorKey request.CoordinatorType = coordinatorType // Version 1 adds KeyType. if client.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } // Version 2 is the same as version 1. if client.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 2 } response, err := broker.FindCoordinator(request) if err != nil { Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) var packetEncodingError PacketEncodingError if errors.As(err, &packetEncodingError) { return nil, err } else { _ = broker.Close() brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) continue } } if errors.Is(response.Err, ErrNoError) { DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr()) return response, nil } else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) { Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey) // This is very ugly, but this scenario will only happen once per cluster. // The __consumer_offsets topic only has to be created one time. // The number of partitions not configurable, but partition 0 should always exist. if _, err := client.Leader("__consumer_offsets", 0); err != nil { Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") time.Sleep(2 * time.Second) } if coordinatorType == CoordinatorTransaction { if _, err := client.Leader("__transaction_state", 0); err != nil { Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n") time.Sleep(2 * time.Second) } } return retry(ErrConsumerCoordinatorNotAvailable) } else if errors.Is(response.Err, ErrGroupAuthorizationFailed) { Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey) return retry(ErrGroupAuthorizationFailed) } else { return nil, response.Err } } Logger.Println("client/coordinator no available broker to send consumer metadata request to") client.resurrectDeadBrokers() return retry(Wrap(ErrOutOfBrokers, brokerErrors...)) } func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) { ctx := context.Background() dialer := client.Config().getDialer() resolver := net.Resolver{ Dial: func(ctx context.Context, network, address string) (net.Conn, error) { // dial func should only be called once, so switching within is acceptable switch d := dialer.(type) { case proxy.ContextDialer: return d.DialContext(ctx, network, address) default: // we have no choice but to ignore the context return d.Dial(network, address) } }, } canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go for _, addr := range addrs { host, port, err := net.SplitHostPort(addr) if err != nil { return nil, err // message includes addr } ips, err := resolver.LookupHost(ctx, host) if err != nil { return nil, err // message includes host } for _, ip := range ips { ptrs, err := resolver.LookupAddr(ctx, ip) if err != nil { return nil, err // message includes ip } // unlike the Java client, we do not further check that PTRs resolve ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{} } } addrs = make([]string, 0, len(canonicalAddrs)) for addr := range canonicalAddrs { addrs = append(addrs, addr) } return addrs, nil } // nopCloserClient embeds an existing Client, but disables // the Close method (yet all other methods pass // through unchanged). This is for use in larger structs // where it is undesirable to close the client that was // passed in by the caller. type nopCloserClient struct { Client } // Close intercepts and purposely does not call the underlying // client's Close() method. func (ncc *nopCloserClient) Close() error { return nil } golang-github-ibm-sarama-1.43.2/client_test.go000066400000000000000000001066451461256741300212300ustar00rootroot00000000000000package sarama import ( "errors" "io" "sync" "sync/atomic" "syscall" "testing" "time" "github.com/rcrowley/go-metrics" ) func safeClose(t testing.TB, c io.Closer) { t.Helper() err := c.Close() if err != nil { t.Error(err) } } func TestSimpleClient(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } seedBroker.Close() safeClose(t, client) } func TestCachedPartitions(t *testing.T) { seedBroker := NewMockBroker(t, 1) replicas := []int32{3, 1, 5} isr := []int32{5, 1} metadataResponse := new(MetadataResponse) metadataResponse.AddBroker("localhost:12345", 2) metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, []int32{}, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, []int32{}, ErrLeaderNotAvailable) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Metadata.Retry.Max = 0 c, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer safeClose(t, c) client := c.(*client) // Verify they aren't cached the same allP := client.cachedPartitionsResults["my_topic"][allPartitions] writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] if len(allP) == len(writeP) { t.Fatal("Invalid lengths!") } tmp := client.cachedPartitionsResults["my_topic"] // Verify we actually use the cache at all! tmp[allPartitions] = []int32{1, 2, 3, 4} client.cachedPartitionsResults["my_topic"] = tmp if len(client.cachedPartitions("my_topic", allPartitions)) != 4 { t.Fatal("Not using the cache!") } seedBroker.Close() } func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { seedBroker := NewMockBroker(t, 1) replicas := []int32{seedBroker.BrokerID()} metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, []int32{}, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, []int32{}, ErrNoError) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Metadata.Retry.Max = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } metadataResponse = new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) seedBroker.Returns(metadataResponse) partitions, err := client.Partitions("unknown") if !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Expected ErrUnknownTopicOrPartition, found", err) } if partitions != nil { t.Errorf("Should return nil as partition list, found %v", partitions) } // Should still use the cache of a known topic _, err = client.Partitions("my_topic") if err != nil { t.Errorf("Expected no error, found %v", err) } metadataResponse = new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) seedBroker.Returns(metadataResponse) // Should not use cache for unknown topic partitions, err = client.Partitions("unknown") if !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Expected ErrUnknownTopicOrPartition, found", err) } if partitions != nil { t.Errorf("Should return nil as partition list, found %v", partitions) } seedBroker.Close() safeClose(t, client) } func TestClientSeedBrokers(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker("localhost:12345", 2) seedBroker.Returns(metadataResponse) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } seedBroker.Close() safeClose(t, client) } func TestClientMetadata(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 5) replicas := []int32{3, 1, 5} isr := []int32{5, 1} metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, []int32{}, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, []int32{}, ErrLeaderNotAvailable) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Metadata.Retry.Max = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topics, err := client.Topics() if err != nil { t.Error(err) } else if len(topics) != 1 || topics[0] != "my_topic" { t.Error("Client returned incorrect topics:", topics) } parts, err := client.Partitions("my_topic") if err != nil { t.Error(err) } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { t.Error("Client returned incorrect partitions for my_topic:", parts) } parts, err = client.WritablePartitions("my_topic") if err != nil { t.Error(err) } else if len(parts) != 1 || parts[0] != 0 { t.Error("Client returned incorrect writable partitions for my_topic:", parts) } tst, err := client.Leader("my_topic", 0) if err != nil { t.Error(err) } else if tst.ID() != 5 { t.Error("Leader for my_topic had incorrect ID.") } replicas, err = client.Replicas("my_topic", 0) if err != nil { t.Error(err) } else if replicas[0] != 3 { t.Error("Incorrect (or sorted) replica") } else if replicas[1] != 1 { t.Error("Incorrect (or sorted) replica") } else if replicas[2] != 5 { t.Error("Incorrect (or sorted) replica") } isr, err = client.InSyncReplicas("my_topic", 0) if err != nil { t.Error(err) } else if len(isr) != 2 { t.Error("Client returned incorrect ISRs for partition:", isr) } else if isr[0] != 5 { t.Error("Incorrect (or sorted) ISR:", isr) } else if isr[1] != 1 { t.Error("Incorrect (or sorted) ISR:", isr) } leader.Close() seedBroker.Close() safeClose(t, client) } func TestClientMetadataWithOfflineReplicas(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 5) replicas := []int32{1, 2, 3} isr := []int32{1, 2} offlineReplicas := []int32{3} metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, offlineReplicas, ErrNoError) metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, []int32{}, ErrNoError) metadataResponse.Version = 5 seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Version = V1_0_0_0 config.Metadata.Retry.Max = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } topics, err := client.Topics() if err != nil { t.Error(err) } else if len(topics) != 1 || topics[0] != "my_topic" { t.Error("Client returned incorrect topics:", topics) } parts, err := client.Partitions("my_topic") if err != nil { t.Error(err) } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { t.Error("Client returned incorrect partitions for my_topic:", parts) } parts, err = client.WritablePartitions("my_topic") if err != nil { t.Error(err) } else if len(parts) != 2 { t.Error("Client returned incorrect writable partitions for my_topic:", parts) } tst, err := client.Leader("my_topic", 0) if err != nil { t.Error(err) } else if tst.ID() != 5 { t.Error("Leader for my_topic had incorrect ID.") } replicas, err = client.Replicas("my_topic", 0) if err != nil { t.Error(err) } else if replicas[0] != 1 { t.Error("Incorrect (or sorted) replica") } else if replicas[1] != 2 { t.Error("Incorrect (or sorted) replica") } else if replicas[2] != 3 { t.Error("Incorrect (or sorted) replica") } isr, err = client.InSyncReplicas("my_topic", 0) if err != nil { t.Error(err) } else if len(isr) != 2 { t.Error("Client returned incorrect ISRs for partition:", isr) } else if isr[0] != 1 { t.Error("Incorrect (or sorted) ISR:", isr) } else if isr[1] != 2 { t.Error("Incorrect (or sorted) ISR:", isr) } offlineReplicas, err = client.OfflineReplicas("my_topic", 0) if err != nil { t.Error(err) } else if len(offlineReplicas) != 1 { t.Error("Client returned incorrect offline replicas for partition:", offlineReplicas) } else if offlineReplicas[0] != 3 { t.Error("Incorrect offline replica:", offlineReplicas) } leader.Close() seedBroker.Close() safeClose(t, client) } func TestClientGetOffset(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) leaderAddr := leader.Addr() metadata := new(MetadataResponse) metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) metadata.AddBroker(leaderAddr, leader.BrokerID()) seedBroker.Returns(metadata) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } offsetResponse := new(OffsetResponse) offsetResponse.AddTopicPartition("foo", 0, 123) leader.Returns(offsetResponse) offset, err := client.GetOffset("foo", 0, OffsetNewest) if err != nil { t.Error(err) } if offset != 123 { t.Error("Unexpected offset, got ", offset) } leader.Close() leader = NewMockBrokerAddr(t, 2, leaderAddr) offsetResponse = new(OffsetResponse) offsetResponse.AddTopicPartition("foo", 0, 456) leader.Returns(metadata) leader.Returns(offsetResponse) offset, err = client.GetOffset("foo", 0, OffsetNewest) if err != nil { t.Error(err) } if offset != 456 { t.Error("Unexpected offset, got ", offset) } seedBroker.Close() leader.Close() safeClose(t, client) } func TestClientReceivingUnknownTopicWithBackoffFunc(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) retryCount := int32(0) config := NewTestConfig() config.Metadata.Retry.Max = 1 config.Metadata.Retry.BackoffFunc = func(retries, maxRetries int) time.Duration { atomic.AddInt32(&retryCount, 1) return 0 } client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } metadataUnknownTopic := new(MetadataResponse) metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) metadataUnknownTopic.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataUnknownTopic) seedBroker.Returns(metadataUnknownTopic) if err := client.RefreshMetadata("new_topic"); !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("ErrUnknownTopicOrPartition expected, got", err) } safeClose(t, client) seedBroker.Close() actualRetryCount := atomic.LoadInt32(&retryCount) if actualRetryCount != 1 { t.Fatalf("Expected BackoffFunc to be called exactly once, but saw %d", actualRetryCount) } } func TestClientReceivingUnknownTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) config := NewTestConfig() config.Metadata.Retry.Max = 1 config.Metadata.Retry.Backoff = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } metadataUnknownTopic := new(MetadataResponse) metadataUnknownTopic.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) seedBroker.Returns(metadataUnknownTopic) seedBroker.Returns(metadataUnknownTopic) if err := client.RefreshMetadata("new_topic"); !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("ErrUnknownTopicOrPartition expected, got", err) } // If we are asking for the leader of a partition of the non-existing topic. // we will request metadata again. seedBroker.Returns(metadataUnknownTopic) seedBroker.Returns(metadataUnknownTopic) if _, err = client.Leader("new_topic", 1); !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Expected ErrUnknownTopicOrPartition, got", err) } safeClose(t, client) seedBroker.Close() } func TestClientReceivingPartialMetadata(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 5) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) seedBroker.Returns(metadataResponse1) config := NewTestConfig() config.Metadata.Retry.Max = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} metadataPartial := new(MetadataResponse) metadataPartial.AddBroker(leader.Addr(), 5) metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, []int32{}, ErrNoError) metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, []int32{}, ErrLeaderNotAvailable) leader.Returns(metadataPartial) if err := client.RefreshMetadata("new_topic"); err != nil { t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") } // Even though the metadata was incomplete, we should be able to get the leader of a partition // for which we did get a useful response, without doing additional requests. partition0Leader, err := client.Leader("new_topic", 0) if err != nil { t.Error(err) } else if partition0Leader.Addr() != leader.Addr() { t.Error("Unexpected leader returned", partition0Leader.Addr()) } // If we are asking for the leader of a partition that didn't have a leader before, // we will do another metadata request. leader.Returns(metadataPartial) // Still no leader for the partition, so asking for it should return an error. _, err = client.Leader("new_topic", 1) if !errors.Is(err, ErrLeaderNotAvailable) { t.Error("Expected ErrLeaderNotAvailable, got", err) } safeClose(t, client) seedBroker.Close() leader.Close() } func TestClientRefreshBehaviourWhenEmptyMetadataResponse(t *testing.T) { seedBroker := NewMockBroker(t, 1) broker := NewMockBroker(t, 2) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) c, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } client := c.(*client) if len(client.seedBrokers) != 1 { t.Error("incorrect number of live seeds") } if len(client.deadSeeds) != 0 { t.Error("incorrect number of dead seeds") } if len(client.brokers) != 1 { t.Error("incorrect number of brokers") } // Empty metadata response seedBroker.Returns(new(MetadataResponse)) metadataResponse2 := new(MetadataResponse) metadataResponse2.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse2.AddBroker(broker.Addr(), broker.BrokerID()) seedBroker.Returns(metadataResponse2) err = c.RefreshMetadata() if err != nil { t.Fatal(err) } if len(client.seedBrokers) != 1 { t.Error("incorrect number of live seeds") } if len(client.deadSeeds) != 0 { t.Error("incorrect number of dead seeds") } if len(client.brokers) != 2 { t.Error("incorrect number of brokers") } broker.Close() seedBroker.Close() safeClose(t, client) } func TestClientRefreshBehaviour(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 5) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) seedBroker.Returns(metadataResponse1) metadataResponse2 := new(MetadataResponse) metadataResponse2.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, nil, ErrNoError) leader.Returns(metadataResponse2) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } parts, err := client.Partitions("my_topic") if err != nil { t.Error(err) } else if len(parts) != 1 || parts[0] != 0xb { t.Error("Client returned incorrect partitions for my_topic:", parts) } tst, err := client.Leader("my_topic", 0xb) if err != nil { t.Error(err) } else if tst.ID() != 5 { t.Error("Leader for my_topic had incorrect ID.") } leader.Close() seedBroker.Close() safeClose(t, client) } func TestClientRefreshBrokers(t *testing.T) { initialSeed := NewMockBroker(t, 0) defer initialSeed.Close() leader := NewMockBroker(t, 5) defer leader.Close() metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse1.AddBroker(initialSeed.Addr(), initialSeed.BrokerID()) initialSeed.Returns(metadataResponse1) c, err := NewClient([]string{initialSeed.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } defer c.Close() client := c.(*client) if len(client.Brokers()) != 2 { t.Error("Meta broker is not 2") } newSeedBrokers := []string{"localhost:12345"} _ = client.RefreshBrokers(newSeedBrokers) if client.seedBrokers[0].addr != newSeedBrokers[0] { t.Error("Seed broker not updated") } if len(client.Brokers()) != 0 { t.Error("Old brokers not closed") } } func TestClientRefreshMetadataBrokerOffline(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() leader := NewMockBroker(t, 5) defer leader.Close() metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } defer client.Close() if len(client.Brokers()) != 2 { t.Error("Meta broker is not 2") } metadataResponse2 := NewMockMetadataResponse(t).SetBroker(leader.Addr(), leader.BrokerID()) seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse2, }) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse2, }) if err := client.RefreshMetadata(); err != nil { t.Error(err) } if len(client.Brokers()) != 1 { t.Error("Meta broker is not 1") } } func TestClientGetBroker(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() leader := NewMockBroker(t, 5) defer leader.Close() metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } defer client.Close() broker, err := client.Broker(leader.BrokerID()) if err != nil { t.Fatal(err) } if broker.Addr() != leader.Addr() { t.Errorf("Expected broker to have address %s, found %s", leader.Addr(), broker.Addr()) } metadataResponse2 := NewMockMetadataResponse(t).SetBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse2, }) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": metadataResponse2, }) if err := client.RefreshMetadata(); err != nil { t.Error(err) } _, err = client.Broker(leader.BrokerID()) if !errors.Is(err, ErrBrokerNotFound) { t.Errorf("Expected Broker(brokerID) to return %v found %v", ErrBrokerNotFound, err) } } func TestClientResurrectDeadSeeds(t *testing.T) { initialSeed := NewMockBroker(t, 0) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(initialSeed.Addr(), initialSeed.BrokerID()) initialSeed.Returns(metadataResponse) conf := NewTestConfig() conf.Metadata.Retry.Backoff = 0 conf.Metadata.RefreshFrequency = 0 c, err := NewClient([]string{initialSeed.Addr()}, conf) if err != nil { t.Fatal(err) } client := c.(*client) seed1 := NewMockBroker(t, 1) seed2 := NewMockBroker(t, 2) seed3 := NewMockBroker(t, 3) addr1 := seed1.Addr() addr2 := seed2.Addr() addr3 := seed3.Addr() // Overwrite the seed brokers with a fixed ordering to make this test deterministic. safeClose(t, client.seedBrokers[0]) client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} client.deadSeeds = []*Broker{} client.brokers = map[int32]*Broker{} wg := sync.WaitGroup{} wg.Add(1) go func() { if err := client.RefreshMetadata(); err != nil { t.Error(err) } wg.Done() }() seed1.Close() seed2.Close() seed1 = NewMockBrokerAddr(t, 1, addr1) seed2 = NewMockBrokerAddr(t, 2, addr2) seed3.Close() seed1.Close() metadataResponse2 := new(MetadataResponse) metadataResponse2.AddBroker(seed2.Addr(), seed2.BrokerID()) seed2.Returns(metadataResponse2) wg.Wait() if len(client.seedBrokers) != 2 { t.Error("incorrect number of live seeds") } if len(client.deadSeeds) != 1 { t.Error("incorrect number of dead seeds") } seed2.Close() safeClose(t, c) } //nolint:paralleltest func TestClientController(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() controllerBroker := NewMockBroker(t, 2) defer controllerBroker.Close() seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(controllerBroker.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()), }) cfg := NewTestConfig() // test kafka version greater than 0.10.0.0 t.Run("V0_10_0_0", func(t *testing.T) { cfg.Version = V0_10_0_0 client1, err := NewClient([]string{seedBroker.Addr()}, cfg) if err != nil { t.Fatal(err) } defer safeClose(t, client1) broker, err := client1.Controller() if err != nil { t.Fatal(err) } if broker.Addr() != controllerBroker.Addr() { t.Errorf("Expected controller to have address %s, found %s", controllerBroker.Addr(), broker.Addr()) } }) // test kafka version earlier than 0.10.0.0 t.Run("V0_9_0_1", func(t *testing.T) { cfg.Version = V0_9_0_1 client2, err := NewClient([]string{seedBroker.Addr()}, cfg) if err != nil { t.Fatal(err) } defer safeClose(t, client2) if _, err = client2.Controller(); !errors.Is(err, ErrUnsupportedVersion) { t.Errorf("Expected Controller() to return %s, found %s", ErrUnsupportedVersion, err) } }) } func TestClientMetadataTimeout(t *testing.T) { tests := []struct { name string timeout time.Duration }{ { "timeout=250ms", 250 * time.Millisecond, // Will cut the first retry pass }, { "timeout=500ms", 500 * time.Millisecond, // Will cut the second retry pass }, { "timeout=750ms", 750 * time.Millisecond, // Will cut the third retry pass }, { "timeout=900ms", 900 * time.Millisecond, // Will stop after the three retries }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { // Use a responsive broker to create a working client initialSeed := NewMockBroker(t, 0) emptyMetadata := new(MetadataResponse) emptyMetadata.AddBroker(initialSeed.Addr(), initialSeed.BrokerID()) initialSeed.Returns(emptyMetadata) conf := NewTestConfig() // Speed up the metadata request failure because of a read timeout conf.Net.ReadTimeout = 100 * time.Millisecond // Disable backoff and refresh conf.Metadata.Retry.Backoff = 0 conf.Metadata.RefreshFrequency = 0 // But configure a "global" timeout conf.Metadata.Timeout = tc.timeout c, err := NewClient([]string{initialSeed.Addr()}, conf) if err != nil { t.Fatal(err) } initialSeed.Close() client := c.(*client) // Start seed brokers that do not reply to anything and therefore a read // on the TCP connection will timeout to simulate unresponsive brokers seed1 := NewMockBroker(t, 1) defer seed1.Close() seed2 := NewMockBroker(t, 2) defer seed2.Close() // Overwrite the seed brokers with a fixed ordering to make this test deterministic safeClose(t, client.seedBrokers[0]) client.seedBrokers = []*Broker{NewBroker(seed1.Addr()), NewBroker(seed2.Addr())} client.deadSeeds = []*Broker{} // Start refreshing metadata in the background errChan := make(chan error) go func() { errChan <- c.RefreshMetadata() }() // Check that the refresh fails fast enough (less than twice the configured timeout) // instead of at least: 100 ms * 2 brokers * 3 retries = 800 ms maxRefreshDuration := 2 * tc.timeout select { case err := <-errChan: if err == nil { t.Fatal("Expected failed RefreshMetadata, got nil") } if !errors.Is(err, ErrOutOfBrokers) { t.Error("Expected failed RefreshMetadata with ErrOutOfBrokers, got:", err) } case <-time.After(maxRefreshDuration): t.Fatalf("RefreshMetadata did not fail fast enough after waiting for %v", maxRefreshDuration) } safeClose(t, c) }) } } func TestClientUpdateMetadataErrorAndRetry(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(seedBroker.Addr(), 1) seedBroker.Returns(metadataResponse1) config := NewTestConfig() config.Metadata.Retry.Max = 3 config.Metadata.Retry.Backoff = 200 * time.Millisecond config.Metadata.RefreshFrequency = 0 config.Net.ReadTimeout = 10 * time.Millisecond config.Net.WriteTimeout = 10 * time.Millisecond client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } waitGroup := sync.WaitGroup{} waitGroup.Add(10) for i := 0; i < 10; i++ { go func() { defer waitGroup.Done() var failedMetadataResponse MetadataResponse failedMetadataResponse.AddBroker(seedBroker.Addr(), 1) failedMetadataResponse.AddTopic("new_topic", ErrUnknownTopicOrPartition) seedBroker.Returns(&failedMetadataResponse) err := client.RefreshMetadata() if err == nil { t.Error("should return error") return } }() } waitGroup.Wait() safeClose(t, client) seedBroker.Close() } func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) coordinator := NewMockBroker(t, 2) replicas := []int32{coordinator.BrokerID()} metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(coordinator.Addr(), coordinator.BrokerID()) metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, []int32{}, ErrNoError) seedBroker.Returns(metadataResponse1) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } coordinatorResponse1 := new(ConsumerMetadataResponse) coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable coordinator.Returns(coordinatorResponse1) coordinatorResponse2 := new(ConsumerMetadataResponse) coordinatorResponse2.CoordinatorID = coordinator.BrokerID() coordinatorResponse2.CoordinatorHost = "127.0.0.1" coordinatorResponse2.CoordinatorPort = coordinator.Port() coordinator.Returns(coordinatorResponse2) broker, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if coordinator.Addr() != broker.Addr() { t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) } if coordinator.BrokerID() != broker.ID() { t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) } // Grab the cached value broker2, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if broker2.Addr() != broker.Addr() { t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) } coordinator.Close() seedBroker.Close() safeClose(t, client) } func TestClientCoordinatorChangeWithConsumerOffsetsTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) staleCoordinator := NewMockBroker(t, 2) freshCoordinator := NewMockBroker(t, 3) replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, []int32{}, ErrNoError) seedBroker.Returns(metadataResponse1) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } findCoordinatorResponse := NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, "my_group", staleCoordinator) staleCoordinator.SetHandlerByMap(map[string]MockResponse{ "FindCoordinatorRequest": findCoordinatorResponse, }) freshCoordinator.SetHandlerByMap(map[string]MockResponse{ "FindCoordinatorRequest": findCoordinatorResponse, }) broker, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if staleCoordinator.Addr() != broker.Addr() { t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) } if staleCoordinator.BrokerID() != broker.ID() { t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) } // Grab the cached value broker2, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if broker2.Addr() != broker.Addr() { t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) } findCoordinatorResponse2 := NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, "my_group", freshCoordinator) staleCoordinator.SetHandlerByMap(map[string]MockResponse{ "FindCoordinatorRequest": findCoordinatorResponse2, }) freshCoordinator.SetHandlerByMap(map[string]MockResponse{ "FindCoordinatorRequest": findCoordinatorResponse2, }) // Refresh the locally cached value because it's stale if err := client.RefreshCoordinator("my_group"); err != nil { t.Error(err) } // Grab the fresh value broker3, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if broker3.Addr() != freshCoordinator.Addr() { t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) } freshCoordinator.Close() staleCoordinator.Close() seedBroker.Close() safeClose(t, client) } func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { seedBroker := NewMockBroker(t, 1) coordinator := NewMockBroker(t, 2) metadataResponse1 := new(MetadataResponse) metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse1) config := NewTestConfig() config.Metadata.Retry.Max = 1 config.Metadata.Retry.Backoff = 0 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } coordinatorResponse1 := new(ConsumerMetadataResponse) coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable seedBroker.Returns(coordinatorResponse1) metadataResponse2 := new(MetadataResponse) metadataResponse2.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) seedBroker.Returns(metadataResponse2) replicas := []int32{coordinator.BrokerID()} metadataResponse3 := new(MetadataResponse) metadataResponse3.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, []int32{}, ErrNoError) seedBroker.Returns(metadataResponse3) coordinatorResponse2 := new(ConsumerMetadataResponse) coordinatorResponse2.CoordinatorID = coordinator.BrokerID() coordinatorResponse2.CoordinatorHost = "127.0.0.1" coordinatorResponse2.CoordinatorPort = coordinator.Port() seedBroker.Returns(coordinatorResponse2) broker, err := client.Coordinator("my_group") if err != nil { t.Error(err) } if coordinator.Addr() != broker.Addr() { t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) } if coordinator.BrokerID() != broker.ID() { t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) } coordinator.Close() seedBroker.Close() safeClose(t, client) } func TestClientAutorefreshShutdownRace(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) conf := NewTestConfig() conf.Metadata.RefreshFrequency = 100 * time.Millisecond client, err := NewClient([]string{seedBroker.Addr()}, conf) if err != nil { t.Fatal(err) } // Wait for the background refresh to kick in time.Sleep(110 * time.Millisecond) errCh := make(chan error, 1) go func() { // Close the client errCh <- client.Close() close(errCh) }() // Wait for the Close to kick in time.Sleep(10 * time.Millisecond) // Then return some metadata to the still-running background thread leader := NewMockBroker(t, 2) defer leader.Close() metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, []int32{}, ErrNoError) seedBroker.Returns(metadataResponse) err = <-errCh if err != nil { t.Fatalf("goroutine client.Close():%s", err) } // give the update time to happen so we get a panic if it's still running (which it shouldn't) time.Sleep(10 * time.Millisecond) } func TestClientConnectionRefused(t *testing.T) { t.Parallel() seedBroker := NewMockBroker(t, 1) seedBroker.Close() _, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if !errors.Is(err, ErrOutOfBrokers) { t.Fatalf("unexpected error: %v", err) } if !errors.Is(err, syscall.ECONNREFUSED) { t.Fatalf("unexpected error: %v", err) } } func TestClientCoordinatorConnectionRefused(t *testing.T) { t.Parallel() seedBroker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) client, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } seedBroker.Close() _, err = client.Coordinator("my_group") if !errors.Is(err, ErrOutOfBrokers) { t.Fatalf("unexpected error: %v", err) } if !errors.Is(err, syscall.ECONNREFUSED) { t.Fatalf("unexpected error: %v", err) } safeClose(t, client) } func TestInitProducerIDConnectionRefused(t *testing.T) { t.Parallel() seedBroker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) metadataResponse.Version = 4 seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Producer.Idempotent = true config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } seedBroker.Close() _, err = client.InitProducerID() if !errors.Is(err, ErrOutOfBrokers) { t.Fatalf("unexpected error: %v", err) } if !errors.Is(err, io.EOF) && !errors.Is(err, syscall.ECONNRESET) { t.Fatalf("unexpected error: %v", err) } safeClose(t, client) } func TestMetricsCleanup(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) config := NewTestConfig() metrics.GetOrRegisterMeter("a", config.MetricRegistry) client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } safeClose(t, client) // Wait async close time.Sleep(10 * time.Millisecond) all := config.MetricRegistry.GetAll() if len(all) != 1 || all["a"] == nil { t.Errorf("excepted 1 metric, found: %v", all) } } golang-github-ibm-sarama-1.43.2/client_tls_test.go000066400000000000000000000141201461256741300220740ustar00rootroot00000000000000package sarama import ( "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "math/big" "net" "testing" "time" ) func TestTLS(t *testing.T) { cakey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } clientkey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } hostkey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } nvb := time.Now().Add(-1 * time.Hour) nva := time.Now().Add(1 * time.Hour) caTemplate := &x509.Certificate{ Subject: pkix.Name{CommonName: "ca"}, Issuer: pkix.Name{CommonName: "ca"}, SerialNumber: big.NewInt(0), NotAfter: nva, NotBefore: nvb, IsCA: true, BasicConstraintsValid: true, KeyUsage: x509.KeyUsageCertSign, } caDer, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &cakey.PublicKey, cakey) if err != nil { t.Fatal(err) } caFinalCert, err := x509.ParseCertificate(caDer) if err != nil { t.Fatal(err) } hostDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ Subject: pkix.Name{CommonName: "host"}, Issuer: pkix.Name{CommonName: "ca"}, IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, SerialNumber: big.NewInt(0), NotAfter: nva, NotBefore: nvb, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, }, caFinalCert, &hostkey.PublicKey, cakey) if err != nil { t.Fatal(err) } clientDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ Subject: pkix.Name{CommonName: "client"}, Issuer: pkix.Name{CommonName: "ca"}, SerialNumber: big.NewInt(0), NotAfter: nva, NotBefore: nvb, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, }, caFinalCert, &clientkey.PublicKey, cakey) if err != nil { t.Fatal(err) } pool := x509.NewCertPool() pool.AddCert(caFinalCert) systemCerts, err := x509.SystemCertPool() if err != nil { t.Fatal(err) } // Keep server the same - it's the client that we're testing serverTLSConfig := &tls.Config{ Certificates: []tls.Certificate{{ Certificate: [][]byte{hostDer}, PrivateKey: hostkey, }}, ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: pool, MinVersion: tls.VersionTLS12, } for _, tc := range []struct { name string Succeed bool Server, Client *tls.Config }{ { name: "Verify client fails if wrong CA cert pool is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: systemCerts, Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: clientkey, }}, MinVersion: tls.VersionTLS12, }, }, { name: "Verify client fails if wrong key is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: pool, Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: hostkey, }}, MinVersion: tls.VersionTLS12, }, }, { name: "Verify client fails if wrong cert is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: pool, Certificates: []tls.Certificate{{ Certificate: [][]byte{hostDer}, PrivateKey: clientkey, }}, MinVersion: tls.VersionTLS12, }, }, { name: "Verify client fails if no CAs are specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: clientkey, }}, MinVersion: tls.VersionTLS12, }, }, { name: "Verify client fails if no keys are specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: pool, MinVersion: tls.VersionTLS12, }, }, { name: "Finally, verify it all works happily with client and server cert in place", Succeed: true, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: pool, Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: clientkey, }}, MinVersion: tls.VersionTLS12, }, }, } { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client) }) } } func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientConfig *tls.Config) { seedListener, err := tls.Listen("tcp", "127.0.0.1:0", serverConfig) if err != nil { t.Fatal("cannot open listener", err) } var childT *testing.T if expectSuccess { childT = t } else { childT = &testing.T{} // we want to swallow errors } seedBroker := NewMockBrokerListener(childT, 1, seedListener) defer seedBroker.Close() metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) config := NewTestConfig() config.Net.TLS.Enable = true config.Net.TLS.Config = clientConfig client, err := NewClient([]string{seedBroker.Addr()}, config) if err == nil { safeClose(t, client) } if expectSuccess { if err != nil { t.Fatal(err) } } else { if err == nil { t.Fatal("expected failure") } } } func TestSetServerName(t *testing.T) { if validServerNameTLS("kafka-server.domain.com:9093", nil).ServerName != "kafka-server.domain.com" { t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config is nil") } if validServerNameTLS("kafka-server.domain.com:9093", &tls.Config{MinVersion: tls.VersionTLS12}).ServerName != "kafka-server.domain.com" { t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config ServerName is not provided") } c := &tls.Config{ServerName: "kafka-server-other.domain.com", MinVersion: tls.VersionTLS12} if validServerNameTLS("", c).ServerName != "kafka-server-other.domain.com" { t.Fatal("Expected kafka-server-other.domain.com as tls.ServerName when tls config ServerName is provided") } if validServerNameTLS("host-no-port", nil).ServerName != "" { t.Fatal("Expected empty ServerName as the broker addr is missing the port") } } golang-github-ibm-sarama-1.43.2/compress.go000066400000000000000000000107541461256741300205410ustar00rootroot00000000000000package sarama import ( "bytes" "fmt" "sync" snappy "github.com/eapache/go-xerial-snappy" "github.com/klauspost/compress/gzip" "github.com/pierrec/lz4/v4" ) var ( lz4WriterPool = sync.Pool{ New: func() interface{} { return lz4.NewWriter(nil) }, } gzipWriterPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) }, } gzipWriterPoolForCompressionLevel1 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 1) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel2 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 2) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel3 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 3) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel4 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 4) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel5 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 5) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel6 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 6) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel7 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 7) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel8 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 8) if err != nil { panic(err) } return gz }, } gzipWriterPoolForCompressionLevel9 = sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(nil, 9) if err != nil { panic(err) } return gz }, } ) func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { switch cc { case CompressionNone: return data, nil case CompressionGZIP: var ( err error buf bytes.Buffer writer *gzip.Writer ) switch level { case CompressionLevelDefault: writer = gzipWriterPool.Get().(*gzip.Writer) defer gzipWriterPool.Put(writer) writer.Reset(&buf) case 1: writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel1.Put(writer) writer.Reset(&buf) case 2: writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel2.Put(writer) writer.Reset(&buf) case 3: writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel3.Put(writer) writer.Reset(&buf) case 4: writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel4.Put(writer) writer.Reset(&buf) case 5: writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel5.Put(writer) writer.Reset(&buf) case 6: writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel6.Put(writer) writer.Reset(&buf) case 7: writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel7.Put(writer) writer.Reset(&buf) case 8: writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel8.Put(writer) writer.Reset(&buf) case 9: writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) defer gzipWriterPoolForCompressionLevel9.Put(writer) writer.Reset(&buf) default: writer, err = gzip.NewWriterLevel(&buf, level) if err != nil { return nil, err } } if _, err := writer.Write(data); err != nil { return nil, err } if err := writer.Close(); err != nil { return nil, err } return buf.Bytes(), nil case CompressionSnappy: return snappy.Encode(data), nil case CompressionLZ4: writer := lz4WriterPool.Get().(*lz4.Writer) defer lz4WriterPool.Put(writer) var buf bytes.Buffer writer.Reset(&buf) if _, err := writer.Write(data); err != nil { return nil, err } if err := writer.Close(); err != nil { return nil, err } return buf.Bytes(), nil case CompressionZSTD: return zstdCompress(ZstdEncoderParams{level}, nil, data) default: return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} } } golang-github-ibm-sarama-1.43.2/config.go000066400000000000000000001147611461256741300201560ustar00rootroot00000000000000package sarama import ( "crypto/tls" "fmt" "io" "net" "regexp" "time" "github.com/klauspost/compress/gzip" "github.com/rcrowley/go-metrics" "golang.org/x/net/proxy" ) const defaultClientID = "sarama" // validClientID specifies the permitted characters for a client.id when // connecting to Kafka versions before 1.0.0 (KIP-190) var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. Admin struct { Retry struct { // The total number of times to retry sending (retriable) admin requests (default 5). // Similar to the `retries` setting of the JVM AdminClientConfig. Max int // Backoff time between retries of a failed request (default 100ms) Backoff time.Duration } // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, // including topics, brokers, configurations and ACLs (defaults to 3 seconds). Timeout time.Duration } // Net is the namespace for network-level properties used by the Broker, and // shared by the Client/Producer/Consumer. Net struct { // How many outstanding requests a connection is allowed to have before // sending on it blocks (default 5). // Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see: // https://kafka.apache.org/protocol#protocol_network // https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection MaxOpenRequests int // All three of the below configurations are similar to the // `socket.timeout.ms` setting in JVM kafka. All of them default // to 30 seconds. DialTimeout time.Duration // How long to wait for the initial connection. ReadTimeout time.Duration // How long to wait for a response. WriteTimeout time.Duration // How long to wait for a transmit. // ResolveCanonicalBootstrapServers turns each bootstrap broker address // into a set of IPs, then does a reverse lookup on each one to get its // canonical hostname. This list of hostnames then replaces the // original address list. Similar to the `client.dns.lookup` option in // the JVM client, this is especially useful with GSSAPI, where it // allows providing an alias record instead of individual broker // hostnames. Defaults to false. ResolveCanonicalBootstrapServers bool TLS struct { // Whether or not to use TLS when connecting to the broker // (defaults to false). Enable bool // The TLS configuration to use for secure connections if // enabled (defaults to nil). Config *tls.Config } // SASL based authentication with broker. While there are multiple SASL authentication methods // the current implementation is limited to plaintext (SASL/PLAIN) authentication SASL struct { // Whether or not to use SASL authentication when connecting to the broker // (defaults to false). Enable bool // SASLMechanism is the name of the enabled SASL mechanism. // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). Mechanism SASLMechanism // Version is the SASL Protocol Version to use // Kafka > 1.x should use V1, except on Azure EventHub which use V0 Version int16 // Whether or not to send the Kafka SASL handshake first if enabled // (defaults to true). You should only set this to false if you're using // a non-Kafka SASL proxy. Handshake bool // AuthIdentity is an (optional) authorization identity (authzid) to // use for SASL/PLAIN authentication (if different from User) when // an authenticated user is permitted to act as the presented // alternative user. See RFC4616 for details. AuthIdentity string // User is the authentication identity (authcid) to present for // SASL/PLAIN or SASL/SCRAM authentication User string // Password for SASL/PLAIN authentication Password string // authz id used for SASL/SCRAM authentication SCRAMAuthzID string // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM // client used to perform the SCRAM exchange with the server. SCRAMClientGeneratorFunc func() SCRAMClient // TokenProvider is a user-defined callback for generating // access tokens for SASL/OAUTHBEARER auth. See the // AccessTokenProvider interface docs for proper implementation // guidelines. TokenProvider AccessTokenProvider GSSAPI GSSAPIConfig } // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). // If zero or positive, keep-alives are enabled. // If negative, keep-alives are disabled. KeepAlive time.Duration // LocalAddr is the local address to use when dialing an // address. The address must be of a compatible type for the // network being dialed. // If nil, a local address is automatically chosen. LocalAddr net.Addr Proxy struct { // Whether or not to use proxy when connecting to the broker // (defaults to false). Enable bool // The proxy dialer to use enabled (defaults to nil). Dialer proxy.Dialer } } // Metadata is the namespace for metadata management properties used by the // Client, and shared by the Producer/Consumer. Metadata struct { Retry struct { // The total number of times to retry a metadata request when the // cluster is in the middle of a leader election (default 3). Max int // How long to wait for leader election to occur before retrying // (default 250ms). Similar to the JVM's `retry.backoff.ms`. Backoff time.Duration // Called to compute backoff time dynamically. Useful for implementing // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries, maxRetries int) time.Duration } // How frequently to refresh the cluster metadata in the background. // Defaults to 10 minutes. Set to 0 to disable. Similar to // `topic.metadata.refresh.interval.ms` in the JVM version. RefreshFrequency time.Duration // Whether to maintain a full set of metadata for all topics, or just // the minimal set that has been necessary so far. The full set is simpler // and usually more convenient, but can take up a substantial amount of // memory if you have many topics and partitions. Defaults to true. Full bool // How long to wait for a successful metadata response. // Disabled by default which means a metadata request against an unreachable // cluster (all brokers are unreachable or unresponsive) can take up to // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` // to fail. Timeout time.Duration // Whether to allow auto-create topics in metadata refresh. If set to true, // the broker may auto-create topics that we requested which do not already exist, // if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true. AllowAutoTopicCreation bool } // Producer is the namespace for configuration related to producing messages, // used by the Producer. Producer struct { // The maximum permitted size of a message (defaults to 1000000). Should be // set equal to or smaller than the broker's `message.max.bytes`. MaxMessageBytes int // The level of acknowledgement reliability needed from the broker (defaults // to WaitForLocal). Equivalent to the `request.required.acks` setting of the // JVM producer. RequiredAcks RequiredAcks // The maximum duration the broker will wait the receipt of the number of // RequiredAcks (defaults to 10 seconds). This is only relevant when // RequiredAcks is set to WaitForAll or a number > 1. Only supports // millisecond resolution, nanoseconds will be truncated. Equivalent to // the JVM producer's `request.timeout.ms` setting. Timeout time.Duration // The type of compression to use on messages (defaults to no compression). // Similar to `compression.codec` setting of the JVM producer. Compression CompressionCodec // The level of compression to use on messages. The meaning depends // on the actual compression type used and defaults to default compression // level for the codec. CompressionLevel int // Generates partitioners for choosing the partition to send messages to // (defaults to hashing the message key). Similar to the `partitioner.class` // setting for the JVM producer. Partitioner PartitionerConstructor // If enabled, the producer will ensure that exactly one copy of each message is // written. Idempotent bool // Transaction specify Transaction struct { // Used in transactions to identify an instance of a producer through restarts ID string // Amount of time a transaction can remain unresolved (neither committed nor aborted) // default is 1 min Timeout time.Duration Retry struct { // The total number of times to retry sending a message (default 50). // Similar to the `message.send.max.retries` setting of the JVM producer. Max int // How long to wait for the cluster to settle between retries // (default 10ms). Similar to the `retry.backoff.ms` setting of the // JVM producer. Backoff time.Duration // Called to compute backoff time dynamically. Useful for implementing // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries, maxRetries int) time.Duration } } // Return specifies what channels will be populated. If they are set to true, // you must read from the respective channels to prevent deadlock. If, // however, this config is used to create a `SyncProducer`, both must be set // to true and you shall not read from the channels since the producer does // this internally. Return struct { // If enabled, successfully delivered messages will be returned on the // Successes channel (default disabled). Successes bool // If enabled, messages that failed to deliver will be returned on the // Errors channel, including error (default enabled). Errors bool } // The following config options control how often messages are batched up and // sent to the broker. By default, messages are sent as fast as possible, and // all messages received while the current batch is in-flight are placed // into the subsequent batch. Flush struct { // The best-effort number of bytes needed to trigger a flush. Use the // global sarama.MaxRequestSize to set a hard upper limit. Bytes int // The best-effort number of messages needed to trigger a flush. Use // `MaxMessages` to set a hard upper limit. Messages int // The best-effort frequency of flushes. Equivalent to // `queue.buffering.max.ms` setting of JVM producer. Frequency time.Duration // The maximum number of messages the producer will send in a single // broker request. Defaults to 0 for unlimited. Similar to // `queue.buffering.max.messages` in the JVM producer. MaxMessages int } Retry struct { // The total number of times to retry sending a message (default 3). // Similar to the `message.send.max.retries` setting of the JVM producer. Max int // How long to wait for the cluster to settle between retries // (default 100ms). Similar to the `retry.backoff.ms` setting of the // JVM producer. Backoff time.Duration // Called to compute backoff time dynamically. Useful for implementing // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries, maxRetries int) time.Duration } // Interceptors to be called when the producer dispatcher reads the // message for the first time. Interceptors allows to intercept and // possible mutate the message before they are published to Kafka // cluster. *ProducerMessage modified by the first interceptor's // OnSend() is passed to the second interceptor OnSend(), and so on in // the interceptor chain. Interceptors []ProducerInterceptor } // Consumer is the namespace for configuration related to consuming messages, // used by the Consumer. Consumer struct { // Group is the namespace for configuring consumer group. Group struct { Session struct { // The timeout used to detect consumer failures when using Kafka's group management facility. // The consumer sends periodic heartbeats to indicate its liveness to the broker. // If no heartbeats are received by the broker before the expiration of this session timeout, // then the broker will remove this consumer from the group and initiate a rebalance. // Note that the value must be in the allowable range as configured in the broker configuration // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s) Timeout time.Duration } Heartbeat struct { // The expected time between heartbeats to the consumer coordinator when using Kafka's group // management facilities. Heartbeats are used to ensure that the consumer's session stays active and // to facilitate rebalancing when new consumers join or leave the group. // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no // higher than 1/3 of that value. // It can be adjusted even lower to control the expected time for normal rebalances (default 3s) Interval time.Duration } Rebalance struct { // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. // This is basically a limit on the amount of time needed for all tasks to flush any pending // data and commit offsets. If the timeout is exceeded, then the worker will be removed from // the group, which will cause offset commit failures (default 60s). Timeout time.Duration Retry struct { // When a new consumer joins a consumer group the set of consumers attempt to "rebalance" // the load to assign partitions to each consumer. If the set of consumers changes while // this assignment is taking place the rebalance will fail and retry. This setting controls // the maximum number of attempts before giving up (default 4). Max int // Backoff time between retries during rebalance (default 2s) Backoff time.Duration } } Member struct { // Custom metadata to include when joining the group. The user data for all joined members // can be retrieved by sending a DescribeGroupRequest to the broker that is the // coordinator for the group. UserData []byte } // support KIP-345 InstanceId string // If true, consumer offsets will be automatically reset to configured Initial value // if the fetched consumer offset is out of range of available offsets. Out of range // can happen if the data has been deleted from the server, or during situations of // under-replication where a replica does not have all the data yet. It can be // dangerous to reset the offset automatically, particularly in the latter case. Defaults // to true to maintain existing behavior. ResetInvalidOffsets bool } Retry struct { // How long to wait after a failing to read from a partition before // trying again (default 2s). Backoff time.Duration // Called to compute backoff time dynamically. Useful for implementing // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries int) time.Duration } // Fetch is the namespace for controlling how many bytes are retrieved by any // given request. Fetch struct { // The minimum number of message bytes to fetch in a request - the broker // will wait until at least this many are available. The default is 1, // as 0 causes the consumer to spin when no messages are available. // Equivalent to the JVM's `fetch.min.bytes`. Min int32 // The default number of message bytes to fetch from the broker in each // request (default 1MB). This should be larger than the majority of // your messages, or else the consumer will spend a lot of time // negotiating sizes and not actually consuming. Similar to the JVM's // `fetch.message.max.bytes`. Default int32 // The maximum number of message bytes to fetch from the broker in a // single request. Messages larger than this will return // ErrMessageTooLarge and will not be consumable, so you must be sure // this is at least as large as your largest message. Defaults to 0 // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The // global `sarama.MaxResponseSize` still applies. Max int32 } // The maximum amount of time the broker will wait for Consumer.Fetch.Min // bytes to become available before it returns fewer than that anyways. The // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. // Equivalent to the JVM's `fetch.wait.max.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to // process for the user. If writing to the Messages channel takes longer // than this, that partition will stop fetching more messages until it // can proceed again. // Note that, since the Messages channel is buffered, the actual grace time is // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms. // If a message is not written to the Messages channel between two ticks // of the expiryTicker then a timeout is detected. // Using a ticker instead of a timer to detect timeouts should typically // result in many fewer calls to Timer functions which may result in a // significant performance improvement if many messages are being sent // and timeouts are infrequent. // The disadvantage of using a ticker instead of a timer is that // timeouts will be less accurate. That is, the effective timeout could // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For // example, if `MaxProcessingTime` is 100ms then a delay of 180ms // between two messages being sent may not be recognized as a timeout. MaxProcessingTime time.Duration // Return specifies what channels will be populated. If they are set to true, // you must read from them to prevent deadlock. Return struct { // If enabled, any errors that occurred while consuming are returned on // the Errors channel (default disabled). Errors bool } // Offsets specifies configuration for how and when to commit consumed // offsets. This currently requires the manual use of an OffsetManager // but will eventually be automated. Offsets struct { // Deprecated: CommitInterval exists for historical compatibility // and should not be used. Please use Consumer.Offsets.AutoCommit CommitInterval time.Duration // AutoCommit specifies configuration for commit messages automatically. AutoCommit struct { // Whether or not to auto-commit updated offsets back to the broker. // (default enabled). Enable bool // How frequently to commit updated offsets. Ineffective unless // auto-commit is enabled (default 1s) Interval time.Duration } // The initial offset to use if no offset was previously committed. // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. Initial int64 // The retention duration for committed offsets. If zero, disabled // (in which case the `offsets.retention.minutes` option on the // broker will be used). Kafka only supports precision up to // milliseconds; nanoseconds will be truncated. Requires Kafka // broker version 0.9.0 or later. // (default is 0: disabled). Retention time.Duration Retry struct { // The total number of times to retry failing commit // requests during OffsetManager shutdown (default 3). Max int } } // IsolationLevel support 2 mode: // - use `ReadUncommitted` (default) to consume and return all messages in message channel // - use `ReadCommitted` to hide messages that are part of an aborted transaction IsolationLevel IsolationLevel // Interceptors to be called just before the record is sent to the // messages channel. Interceptors allows to intercept and possible // mutate the message before they are returned to the client. // *ConsumerMessage modified by the first interceptor's OnConsume() is // passed to the second interceptor OnConsume(), and so on in the // interceptor chain. Interceptors []ConsumerInterceptor } // A user-provided string sent with every request to the brokers for logging, // debugging, and auditing purposes. Defaults to "sarama", but you should // probably set it to something specific to your application. ClientID string // A rack identifier for this client. This can be any string value which // indicates where this client is physically located. // It corresponds with the broker config 'broker.rack' RackID string // The number of events to buffer in internal and external channels. This // permits the producer and consumer to continue processing some messages // in the background while user code is working, greatly improving throughput. // Defaults to 256. ChannelBufferSize int // ApiVersionsRequest determines whether Sarama should send an // ApiVersionsRequest message to each broker as part of its initial // connection. This defaults to `true` to match the official Java client // and most 3rdparty ones. ApiVersionsRequest bool // The version of Kafka that Sarama will assume it is running against. // Defaults to the oldest supported stable version. Since Kafka provides // backwards-compatibility, setting it to a version older than you have // will not break anything, although it may prevent you from using the // latest features. Setting it to a version greater than you are actually // running may lead to random breakage. Version KafkaVersion // The registry to define metrics into. // Defaults to a local registry. // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" // prior to starting Sarama. // See Examples on how to use the metrics registry MetricRegistry metrics.Registry } // NewConfig returns a new configuration instance with sane defaults. func NewConfig() *Config { c := &Config{} c.Admin.Retry.Max = 5 c.Admin.Retry.Backoff = 100 * time.Millisecond c.Admin.Timeout = 3 * time.Second c.Net.MaxOpenRequests = 5 c.Net.DialTimeout = 30 * time.Second c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second c.Net.SASL.Handshake = true c.Net.SASL.Version = SASLHandshakeV1 c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute c.Metadata.Full = true c.Metadata.AllowAutoTopicCreation = true c.Producer.MaxMessageBytes = 1024 * 1024 c.Producer.RequiredAcks = WaitForLocal c.Producer.Timeout = 10 * time.Second c.Producer.Partitioner = NewHashPartitioner c.Producer.Retry.Max = 3 c.Producer.Retry.Backoff = 100 * time.Millisecond c.Producer.Return.Errors = true c.Producer.CompressionLevel = CompressionLevelDefault c.Producer.Transaction.Timeout = 1 * time.Minute c.Producer.Transaction.Retry.Max = 50 c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond c.Consumer.Fetch.Min = 1 c.Consumer.Fetch.Default = 1024 * 1024 c.Consumer.Retry.Backoff = 2 * time.Second c.Consumer.MaxWaitTime = 500 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false c.Consumer.Offsets.AutoCommit.Enable = true c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest c.Consumer.Offsets.Retry.Max = 3 c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second c.Consumer.Group.ResetInvalidOffsets = true c.ClientID = defaultClientID c.ChannelBufferSize = 256 c.ApiVersionsRequest = true c.Version = DefaultVersion c.MetricRegistry = metrics.NewRegistry() return c } // Validate checks a Config instance. It will return a // ConfigurationError if the specified values don't make sense. // //nolint:gocyclo // This function's cyclomatic complexity has go beyond 100 func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") } if !c.Net.SASL.Enable { if c.Net.SASL.User != "" { Logger.Println("Net.SASL is disabled but a non-empty username was provided.") } if c.Net.SASL.Password != "" { Logger.Println("Net.SASL is disabled but a non-empty password was provided.") } } if c.Producer.RequiredAcks > 1 { Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") } if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") } if c.Producer.Flush.Bytes >= int(MaxRequestSize) { Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") } if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") } if c.Producer.Timeout%time.Millisecond != 0 { Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") } if c.Consumer.MaxWaitTime < 100*time.Millisecond { Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") } if c.Consumer.MaxWaitTime%time.Millisecond != 0 { Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") } if c.Consumer.Offsets.Retention%time.Millisecond != 0 { Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") } if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 { Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") } if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 { Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") } if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 { Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.") } if c.ClientID == defaultClientID { Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") } // validate Net values switch { case c.Net.MaxOpenRequests <= 0: return ConfigurationError("Net.MaxOpenRequests must be > 0") case c.Net.DialTimeout <= 0: return ConfigurationError("Net.DialTimeout must be > 0") case c.Net.ReadTimeout <= 0: return ConfigurationError("Net.ReadTimeout must be > 0") case c.Net.WriteTimeout <= 0: return ConfigurationError("Net.WriteTimeout must be > 0") case c.Net.SASL.Enable: if c.Net.SASL.Mechanism == "" { c.Net.SASL.Mechanism = SASLTypePlaintext } switch c.Net.SASL.Mechanism { case SASLTypePlaintext: if c.Net.SASL.User == "" { return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") } if c.Net.SASL.Password == "" { return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") } case SASLTypeOAuth: if c.Net.SASL.TokenProvider == nil { return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") } case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: if c.Net.SASL.User == "" { return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") } if c.Net.SASL.Password == "" { return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") } if c.Net.SASL.SCRAMClientGeneratorFunc == nil { return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc") } case SASLTypeGSSAPI: if c.Net.SASL.GSSAPI.ServiceName == "" { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } switch c.Net.SASL.GSSAPI.AuthType { case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") } case KRB5_CCACHE_AUTH: if c.Net.SASL.GSSAPI.CCachePath == "" { return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") } default: return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } if c.Net.SASL.GSSAPI.Username == "" { return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used") } if c.Net.SASL.GSSAPI.Realm == "" { return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used") } default: msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`", SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI) return ConfigurationError(msg) } } // validate the Admin values switch { case c.Admin.Timeout <= 0: return ConfigurationError("Admin.Timeout must be > 0") } // validate the Metadata values switch { case c.Metadata.Retry.Max < 0: return ConfigurationError("Metadata.Retry.Max must be >= 0") case c.Metadata.Retry.Backoff < 0: return ConfigurationError("Metadata.Retry.Backoff must be >= 0") case c.Metadata.RefreshFrequency < 0: return ConfigurationError("Metadata.RefreshFrequency must be >= 0") } // validate the Producer values switch { case c.Producer.MaxMessageBytes <= 0: return ConfigurationError("Producer.MaxMessageBytes must be > 0") case c.Producer.RequiredAcks < -1: return ConfigurationError("Producer.RequiredAcks must be >= -1") case c.Producer.Timeout <= 0: return ConfigurationError("Producer.Timeout must be > 0") case c.Producer.Partitioner == nil: return ConfigurationError("Producer.Partitioner must not be nil") case c.Producer.Flush.Bytes < 0: return ConfigurationError("Producer.Flush.Bytes must be >= 0") case c.Producer.Flush.Messages < 0: return ConfigurationError("Producer.Flush.Messages must be >= 0") case c.Producer.Flush.Frequency < 0: return ConfigurationError("Producer.Flush.Frequency must be >= 0") case c.Producer.Flush.MaxMessages < 0: return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") case c.Producer.Retry.Max < 0: return ConfigurationError("Producer.Retry.Max must be >= 0") case c.Producer.Retry.Backoff < 0: return ConfigurationError("Producer.Retry.Backoff must be >= 0") } if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") } if c.Producer.Compression == CompressionGZIP { if c.Producer.CompressionLevel != CompressionLevelDefault { if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil { return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) } } } if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { return ConfigurationError("zstd compression requires Version >= V2_1_0_0") } if c.Producer.Idempotent { if !c.Version.IsAtLeast(V0_11_0_0) { return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") } if c.Producer.Retry.Max == 0 { return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1") } if c.Producer.RequiredAcks != WaitForAll { return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll") } if c.Net.MaxOpenRequests > 1 { return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1") } } if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent { return ConfigurationError("Transactional producer requires Idempotent to be true") } // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: return ConfigurationError("Consumer.Fetch.Min must be > 0") case c.Consumer.Fetch.Default <= 0: return ConfigurationError("Consumer.Fetch.Default must be > 0") case c.Consumer.Fetch.Max < 0: return ConfigurationError("Consumer.Fetch.Max must be >= 0") case c.Consumer.MaxWaitTime < 1*time.Millisecond: return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") case c.Consumer.MaxProcessingTime <= 0: return ConfigurationError("Consumer.MaxProcessingTime must be > 0") case c.Consumer.Retry.Backoff < 0: return ConfigurationError("Consumer.Retry.Backoff must be >= 0") case c.Consumer.Offsets.AutoCommit.Interval <= 0: return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") case c.Consumer.Offsets.Retry.Max < 0: return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") } if c.Consumer.Offsets.CommitInterval != 0 { Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") } if c.Consumer.Group.Rebalance.Strategy != nil { Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" + " and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies") } // validate IsolationLevel if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") } // validate the Consumer Group values switch { case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond: return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms") case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond: return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0: return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty") case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") case c.Consumer.Group.Rebalance.Retry.Max < 0: return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0") case c.Consumer.Group.Rebalance.Retry.Backoff < 0: return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") } for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies { if strategy == nil { return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty") } } if c.Consumer.Group.InstanceId != "" { if !c.Version.IsAtLeast(V2_3_0_0) { return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3") } if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil { return err } } // validate misc shared values switch { case c.ChannelBufferSize < 0: return ConfigurationError("ChannelBufferSize must be >= 0") } // only validate clientID locally for Kafka versions before KIP-190 was implemented if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) { return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID)) } return nil } func (c *Config) getDialer() proxy.Dialer { if c.Net.Proxy.Enable { Logger.Println("using proxy") return c.Net.Proxy.Dialer } else { return &net.Dialer{ Timeout: c.Net.DialTimeout, KeepAlive: c.Net.KeepAlive, LocalAddr: c.Net.LocalAddr, } } } const MAX_GROUP_INSTANCE_ID_LENGTH = 249 var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`) func validateGroupInstanceId(id string) error { if id == "" { return ConfigurationError("Group instance id must be non-empty string") } if id == "." || id == ".." { return ConfigurationError(`Group instance id cannot be "." or ".."`) } if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH { return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id)) } if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) { return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id)) } return nil } golang-github-ibm-sarama-1.43.2/config_resource_type.go000066400000000000000000000011271461256741300231150ustar00rootroot00000000000000package sarama // ConfigResourceType is a type for resources that have configs. type ConfigResourceType int8 // Taken from: // https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 const ( // UnknownResource constant type UnknownResource ConfigResourceType = 0 // TopicResource constant type TopicResource ConfigResourceType = 2 // BrokerResource constant type BrokerResource ConfigResourceType = 4 // BrokerLoggerResource constant type BrokerLoggerResource ConfigResourceType = 8 ) golang-github-ibm-sarama-1.43.2/config_test.go000066400000000000000000000414551461256741300212140ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "os" "strings" "testing" "github.com/rcrowley/go-metrics" assert "github.com/stretchr/testify/require" ) // NewTestConfig returns a config meant to be used by tests. // Due to inconsistencies with the request versions the clients send using the default Kafka version // and the response versions our mocks use, we default to the minimum Kafka version in most tests func NewTestConfig() *Config { config := NewConfig() config.Consumer.Retry.Backoff = 0 config.Producer.Retry.Backoff = 0 config.Version = MinVersion return config } func TestDefaultConfigValidates(t *testing.T) { config := NewTestConfig() if err := config.Validate(); err != nil { t.Error(err) } if config.MetricRegistry == nil { t.Error("Expected non nil metrics.MetricRegistry, got nil") } } // TestInvalidClientIDValidated ensures that the ClientID field is checked // when Version is set to anything less than 1_0_0_0, but otherwise accepted func TestInvalidClientIDValidated(t *testing.T) { for _, version := range SupportedVersions { for _, clientID := range []string{"", "foo:bar", "foo|bar"} { config := NewTestConfig() config.ClientID = clientID config.Version = version err := config.Validate() if config.Version.IsAtLeast(V1_0_0_0) { assert.NoError(t, err) continue } var target ConfigurationError assert.ErrorAs(t, err, &target) assert.ErrorContains(t, err, fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", clientID)) } } } type DummyTokenProvider struct{} func (t *DummyTokenProvider) Token() (*AccessToken, error) { return &AccessToken{Token: "access-token-string"}, nil } func TestNetConfigValidates(t *testing.T) { tests := []struct { name string cfg func(*Config) // resorting to using a function as a param because of internal composite structs err string }{ { "OpenRequests", func(cfg *Config) { cfg.Net.MaxOpenRequests = 0 }, "Net.MaxOpenRequests must be > 0", }, { "DialTimeout", func(cfg *Config) { cfg.Net.DialTimeout = 0 }, "Net.DialTimeout must be > 0", }, { "ReadTimeout", func(cfg *Config) { cfg.Net.ReadTimeout = 0 }, "Net.ReadTimeout must be > 0", }, { "WriteTimeout", func(cfg *Config) { cfg.Net.WriteTimeout = 0 }, "Net.WriteTimeout must be > 0", }, { "SASL.User", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.User = "" }, "Net.SASL.User must not be empty when SASL is enabled", }, { "SASL.Password", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.User = "user" cfg.Net.SASL.Password = "" }, "Net.SASL.Password must not be empty when SASL is enabled", }, { "SASL.Mechanism - Invalid mechanism type", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = "AnIncorrectSASLMechanism" cfg.Net.SASL.TokenProvider = &DummyTokenProvider{} }, "The SASL mechanism configuration is invalid. Possible values are `OAUTHBEARER`, `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` and `GSSAPI`", }, { "SASL.Mechanism.OAUTHBEARER - Missing token provider", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeOAuth cfg.Net.SASL.TokenProvider = nil }, "An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider", }, { "SASL.Mechanism SCRAM-SHA-256 - Missing SCRAM client", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA256 cfg.Net.SASL.SCRAMClientGeneratorFunc = nil cfg.Net.SASL.User = "user" cfg.Net.SASL.Password = "strong_password" }, "A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc", }, { "SASL.Mechanism SCRAM-SHA-512 - Missing SCRAM client", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA512 cfg.Net.SASL.SCRAMClientGeneratorFunc = nil cfg.Net.SASL.User = "user" cfg.Net.SASL.Password = "strong_password" }, "A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc", }, { "SASL.Mechanism GSSAPI (Kerberos) - Using User/Password, Missing password field", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.GSSAPI.Realm = "kafka" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH", }, { "SASL.Mechanism GSSAPI (Kerberos) - Using User/Password, Missing KeyTabPath field", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.GSSAPI.Realm = "kafka" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH", }, { "SASL.Mechanism GSSAPI (Kerberos) - Missing username", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH cfg.Net.SASL.GSSAPI.Password = "sarama" cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.GSSAPI.Realm = "kafka" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used", }, { "SASL.Mechanism GSSAPI (Kerberos) - Missing ServiceName", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.Password = "sarama" cfg.Net.SASL.GSSAPI.Realm = "kafka" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used", }, { "SASL.Mechanism GSSAPI (Kerberos) - Missing AuthType", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.Password = "sarama" cfg.Net.SASL.GSSAPI.Realm = "kafka" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH", }, { "SASL.Mechanism GSSAPI (Kerberos) - Missing KerberosConfigPath", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.Password = "sarama" cfg.Net.SASL.GSSAPI.Realm = "kafka" }, "Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used", }, { "SASL.Mechanism GSSAPI (Kerberos) - Missing Realm", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH cfg.Net.SASL.GSSAPI.Username = "sarama" cfg.Net.SASL.GSSAPI.Password = "sarama" cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used", }, { "SASL.Mechanism GSSAPI (Kerberos) - Using Credentials Cache, Missing CCachePath field", func(cfg *Config) { cfg.Net.SASL.Enable = true cfg.Net.SASL.GSSAPI.ServiceName = "kafka" cfg.Net.SASL.Mechanism = SASLTypeGSSAPI cfg.Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" }, "Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH", }, } for i, test := range tests { c := NewTestConfig() test.cfg(c) err := c.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != test.err { t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) } } } func TestMetadataConfigValidates(t *testing.T) { tests := []struct { name string cfg func(*Config) // resorting to using a function as a param because of internal composite structs err string }{ { "Retry.Max", func(cfg *Config) { cfg.Metadata.Retry.Max = -1 }, "Metadata.Retry.Max must be >= 0", }, { "Retry.Backoff", func(cfg *Config) { cfg.Metadata.Retry.Backoff = -1 }, "Metadata.Retry.Backoff must be >= 0", }, { "RefreshFrequency", func(cfg *Config) { cfg.Metadata.RefreshFrequency = -1 }, "Metadata.RefreshFrequency must be >= 0", }, } for i, test := range tests { c := NewTestConfig() test.cfg(c) err := c.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != test.err { t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) } } } func TestAdminConfigValidates(t *testing.T) { tests := []struct { name string cfg func(*Config) // resorting to using a function as a param because of internal composite structs err string }{ { "Timeout", func(cfg *Config) { cfg.Admin.Timeout = 0 }, "Admin.Timeout must be > 0", }, } for i, test := range tests { c := NewTestConfig() test.cfg(c) err := c.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != test.err { t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) } } } func TestProducerConfigValidates(t *testing.T) { tests := []struct { name string cfg func(*Config) // resorting to using a function as a param because of internal composite structs err string }{ { "MaxMessageBytes", func(cfg *Config) { cfg.Producer.MaxMessageBytes = 0 }, "Producer.MaxMessageBytes must be > 0", }, { "RequiredAcks", func(cfg *Config) { cfg.Producer.RequiredAcks = -2 }, "Producer.RequiredAcks must be >= -1", }, { "Timeout", func(cfg *Config) { cfg.Producer.Timeout = 0 }, "Producer.Timeout must be > 0", }, { "Partitioner", func(cfg *Config) { cfg.Producer.Partitioner = nil }, "Producer.Partitioner must not be nil", }, { "Flush.Bytes", func(cfg *Config) { cfg.Producer.Flush.Bytes = -1 }, "Producer.Flush.Bytes must be >= 0", }, { "Flush.Messages", func(cfg *Config) { cfg.Producer.Flush.Messages = -1 }, "Producer.Flush.Messages must be >= 0", }, { "Flush.Frequency", func(cfg *Config) { cfg.Producer.Flush.Frequency = -1 }, "Producer.Flush.Frequency must be >= 0", }, { "Flush.MaxMessages", func(cfg *Config) { cfg.Producer.Flush.MaxMessages = -1 }, "Producer.Flush.MaxMessages must be >= 0", }, { "Flush.MaxMessages with Producer.Flush.Messages", func(cfg *Config) { cfg.Producer.Flush.MaxMessages = 1 cfg.Producer.Flush.Messages = 2 }, "Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set", }, { "Flush.Retry.Max", func(cfg *Config) { cfg.Producer.Retry.Max = -1 }, "Producer.Retry.Max must be >= 0", }, { "Flush.Retry.Backoff", func(cfg *Config) { cfg.Producer.Retry.Backoff = -1 }, "Producer.Retry.Backoff must be >= 0", }, { "Idempotent Version", func(cfg *Config) { cfg.Producer.Idempotent = true cfg.Version = V0_10_0_0 }, "Idempotent producer requires Version >= V0_11_0_0", }, { "Idempotent with Producer.Retry.Max", func(cfg *Config) { cfg.Version = V0_11_0_0 cfg.Producer.Idempotent = true cfg.Producer.Retry.Max = 0 }, "Idempotent producer requires Producer.Retry.Max >= 1", }, { "Idempotent with Producer.RequiredAcks", func(cfg *Config) { cfg.Version = V0_11_0_0 cfg.Producer.Idempotent = true }, "Idempotent producer requires Producer.RequiredAcks to be WaitForAll", }, { "Idempotent with Net.MaxOpenRequests", func(cfg *Config) { cfg.Version = V0_11_0_0 cfg.Producer.Idempotent = true cfg.Producer.RequiredAcks = WaitForAll }, "Idempotent producer requires Net.MaxOpenRequests to be 1", }, } for i, test := range tests { c := NewTestConfig() test.cfg(c) err := c.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != test.err { t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) } } } func TestConsumerConfigValidates(t *testing.T) { tests := []struct { name string cfg func(*Config) err string }{ { "ReadCommitted Version", func(cfg *Config) { cfg.Version = V0_10_0_0 cfg.Consumer.IsolationLevel = ReadCommitted }, "ReadCommitted requires Version >= V0_11_0_0", }, { "Incorrect isolation level", func(cfg *Config) { cfg.Version = V0_11_0_0 cfg.Consumer.IsolationLevel = IsolationLevel(42) }, "Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted", }, } for i, test := range tests { c := NewTestConfig() test.cfg(c) err := c.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != test.err { t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) } } } func TestLZ4ConfigValidation(t *testing.T) { config := NewTestConfig() config.Producer.Compression = CompressionLZ4 err := config.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != "lz4 compression requires Version >= V0_10_0_0" { t.Error("Expected invalid lz4/kafka version error, got ", err) } config.Version = V0_10_0_0 if err := config.Validate(); err != nil { t.Error("Expected lz4 to work, got ", err) } } func TestZstdConfigValidation(t *testing.T) { config := NewTestConfig() config.Producer.Compression = CompressionZSTD err := config.Validate() var target ConfigurationError if !errors.As(err, &target) || string(target) != "zstd compression requires Version >= V2_1_0_0" { t.Error("Expected invalid zstd/kafka version error, got ", err) } config.Version = V2_1_0_0 if err := config.Validate(); err != nil { t.Error("Expected zstd to work, got ", err) } } func TestValidGroupInstanceId(t *testing.T) { tests := []struct { grouptInstanceId string shouldHaveErr bool }{ {"groupInstanceId1", false}, {"", true}, {".", true}, {"..", true}, {strings.Repeat("a", 250), true}, {"group_InstanceId.1", false}, {"group-InstanceId1", false}, {"group#InstanceId1", true}, } for _, testcase := range tests { err := validateGroupInstanceId(testcase.grouptInstanceId) if !testcase.shouldHaveErr { if err != nil { t.Errorf("Expected validGroupInstanceId %s to pass, got error %v", testcase.grouptInstanceId, err) } } else { if err == nil { t.Errorf("Expected validGroupInstanceId %s to be error, got nil", testcase.grouptInstanceId) } var target ConfigurationError if !errors.As(err, &target) { t.Errorf("Excepted err to be ConfigurationError, got %v", err) } } } } func TestGroupInstanceIdAndVersionValidation(t *testing.T) { config := NewTestConfig() config.Consumer.Group.InstanceId = "groupInstanceId1" if err := config.Validate(); !strings.Contains(err.Error(), "Consumer.Group.InstanceId need Version >= 2.3") { t.Error("Expected invalid group instance error, got ", err) } config.Version = V2_3_0_0 if err := config.Validate(); err != nil { t.Error("Expected group instance to work, got ", err) } } func TestConsumerGroupStrategyCompatibility(t *testing.T) { config := NewTestConfig() config.Consumer.Group.Rebalance.Strategy = NewBalanceStrategySticky() if err := config.Validate(); err != nil { t.Error("Expected passing config validation, got ", err) } } // This example shows how to integrate with an existing registry as well as publishing metrics // on the standard output func ExampleConfig_metrics() { // Our application registry appMetricRegistry := metrics.NewRegistry() appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry) appGauge.Update(1) config := NewTestConfig() // Use a prefix registry instead of the default local one config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.") // Simulate a metric created by sarama without starting a broker saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry) saramaGauge.Update(2) metrics.WriteOnce(appMetricRegistry, os.Stdout) // Output: // gauge m1 // value: 1 // gauge sarama.m2 // value: 2 } golang-github-ibm-sarama-1.43.2/consumer.go000066400000000000000000001073621461256741300205430ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "math" "sync" "sync/atomic" "time" "github.com/rcrowley/go-metrics" ) // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { Headers []*RecordHeader // only set if kafka is version 0.11+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp Key, Value []byte Topic string Partition int32 Offset int64 } // ConsumerError is what is provided to the user when an error occurs. // It wraps an error and includes the topic and partition. type ConsumerError struct { Topic string Partition int32 Err error } func (ce ConsumerError) Error() string { return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) } func (ce ConsumerError) Unwrap() error { return ce.Err } // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors // when stopping. type ConsumerErrors []*ConsumerError func (ce ConsumerErrors) Error() string { return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) } // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of // scope. type Consumer interface { // Topics returns the set of available topics as retrieved from the cluster // metadata. This method is the same as Client.Topics(), and is provided for // convenience. Topics() ([]string, error) // Partitions returns the sorted list of all partition IDs for the given topic. // This method is the same as Client.Partitions(), and is provided for convenience. Partitions(topic string) ([]int32, error) // ConsumePartition creates a PartitionConsumer on the given topic/partition with // the given offset. It will return an error if this Consumer is already consuming // on the given topic/partition. Offset can be a literal offset, or OffsetNewest // or OffsetOldest ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) // HighWaterMarks returns the current high water marks for each topic and partition. // Consistency between partitions is not guaranteed since high water marks are updated separately. HighWaterMarks() map[string]map[int32]int64 // Close shuts down the consumer. It must be called after all child // PartitionConsumers have already been closed. Close() error // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. Pause(topicPartitions map[string][]int32) // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } // max time to wait for more partition subscriptions const partitionConsumersBatchTimeout = 100 * time.Millisecond type consumer struct { conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer client Client metricRegistry metrics.Registry lock sync.Mutex } // NewConsumer creates a new consumer using the given broker addresses and configuration. func NewConsumer(addrs []string, config *Config) (Consumer, error) { client, err := NewClient(addrs, config) if err != nil { return nil, err } return newConsumer(client) } // NewConsumerFromClient creates a new consumer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. func NewConsumerFromClient(client Client) (Consumer, error) { // For clients passed in by the client, ensure we don't // call Close() on it. cli := &nopCloserClient{client} return newConsumer(cli) } func newConsumer(client Client) (Consumer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } c := &consumer{ client: client, conf: client.Config(), children: make(map[string]map[int32]*partitionConsumer), brokerConsumers: make(map[*Broker]*brokerConsumer), metricRegistry: newCleanupRegistry(client.Config().MetricRegistry), } return c, nil } func (c *consumer) Close() error { c.metricRegistry.UnregisterAll() return c.client.Close() } func (c *consumer) Topics() ([]string, error) { return c.client.Topics() } func (c *consumer) Partitions(topic string) ([]int32, error) { return c.client.Partitions(topic) } func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { child := &partitionConsumer{ consumer: c, conf: c.conf, topic: topic, partition: partition, messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), feeder: make(chan *FetchResponse, 1), leaderEpoch: invalidLeaderEpoch, preferredReadReplica: invalidPreferredReplicaID, trigger: make(chan none, 1), dying: make(chan none), fetchSize: c.conf.Consumer.Fetch.Default, } if err := child.chooseStartingOffset(offset); err != nil { return nil, err } leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition) if err != nil { return nil, err } if err := c.addChild(child); err != nil { return nil, err } go withRecover(child.dispatcher) go withRecover(child.responseFeeder) child.leaderEpoch = epoch child.broker = c.refBrokerConsumer(leader) child.broker.input <- child return child, nil } func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { c.lock.Lock() defer c.lock.Unlock() hwms := make(map[string]map[int32]int64) for topic, p := range c.children { hwm := make(map[int32]int64, len(p)) for partition, pc := range p { hwm[partition] = pc.HighWaterMarkOffset() } hwms[topic] = hwm } return hwms } func (c *consumer) addChild(child *partitionConsumer) error { c.lock.Lock() defer c.lock.Unlock() topicChildren := c.children[child.topic] if topicChildren == nil { topicChildren = make(map[int32]*partitionConsumer) c.children[child.topic] = topicChildren } if topicChildren[child.partition] != nil { return ConfigurationError("That topic/partition is already being consumed") } topicChildren[child.partition] = child return nil } func (c *consumer) removeChild(child *partitionConsumer) { c.lock.Lock() defer c.lock.Unlock() delete(c.children[child.topic], child.partition) } func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { c.lock.Lock() defer c.lock.Unlock() bc := c.brokerConsumers[broker] if bc == nil { bc = c.newBrokerConsumer(broker) c.brokerConsumers[broker] = bc } bc.refs++ return bc } func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { c.lock.Lock() defer c.lock.Unlock() brokerWorker.refs-- if brokerWorker.refs == 0 { close(brokerWorker.input) if c.brokerConsumers[brokerWorker.broker] == brokerWorker { delete(c.brokerConsumers, brokerWorker.broker) } } } func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { c.lock.Lock() defer c.lock.Unlock() delete(c.brokerConsumers, brokerWorker.broker) } // Pause implements Consumer. func (c *consumer) Pause(topicPartitions map[string][]int32) { c.lock.Lock() defer c.lock.Unlock() for topic, partitions := range topicPartitions { for _, partition := range partitions { if topicConsumers, ok := c.children[topic]; ok { if partitionConsumer, ok := topicConsumers[partition]; ok { partitionConsumer.Pause() } } } } } // Resume implements Consumer. func (c *consumer) Resume(topicPartitions map[string][]int32) { c.lock.Lock() defer c.lock.Unlock() for topic, partitions := range topicPartitions { for _, partition := range partitions { if topicConsumers, ok := c.children[topic]; ok { if partitionConsumer, ok := topicConsumers[partition]; ok { partitionConsumer.Resume() } } } } } // PauseAll implements Consumer. func (c *consumer) PauseAll() { c.lock.Lock() defer c.lock.Unlock() for _, partitions := range c.children { for _, partitionConsumer := range partitions { partitionConsumer.Pause() } } } // ResumeAll implements Consumer. func (c *consumer) ResumeAll() { c.lock.Lock() defer c.lock.Unlock() for _, partitions := range c.children { for _, partitionConsumer := range partitions { partitionConsumer.Resume() } } } // PartitionConsumer // PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or // AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out // of scope. // // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported // as out of range by the brokers. In this case you should decide what you want to do (try a different offset, // notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. // // To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of // consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process // AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call // Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will // also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call // this before calling Close on the underlying client. AsyncClose() // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service // the Messages channel when this function is called, you will be competing with Close for messages; consider // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. Close() error // Messages returns the read channel for the messages that are returned by // the broker. Messages() <-chan *ConsumerMessage // Errors returns a read channel of errors that occurred during consuming, if // enabled. By default, errors are logged and not returned over this channel. // If you want to implement any custom error handling, set your config's // Consumer.Return.Errors setting to true, and read from this channel. Errors() <-chan *ConsumerError // HighWaterMarkOffset returns the high water mark offset of the partition, // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 // Pause suspends fetching from this partition. Future calls to the broker will not return // any records from these partition until it have been resumed using Resume(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. Pause() // Resume resumes this partition which have been paused with Pause(). // New calls to the broker will return records from these partitions if there are any to be fetched. // If the partition was not previously paused, this method is a no-op. Resume() // IsPaused indicates if this partition consumer is paused or not IsPaused() bool } type partitionConsumer struct { highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG consumer *consumer conf *Config broker *brokerConsumer messages chan *ConsumerMessage errors chan *ConsumerError feeder chan *FetchResponse leaderEpoch int32 preferredReadReplica int32 trigger, dying chan none closeOnce sync.Once topic string partition int32 responseResult error fetchSize int32 offset int64 retries int32 paused int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing func (child *partitionConsumer) sendError(err error) { cErr := &ConsumerError{ Topic: child.topic, Partition: child.partition, Err: err, } if child.conf.Consumer.Return.Errors { child.errors <- cErr } else { Logger.Println(cErr) } } func (child *partitionConsumer) computeBackoff() time.Duration { if child.conf.Consumer.Retry.BackoffFunc != nil { retries := atomic.AddInt32(&child.retries, 1) return child.conf.Consumer.Retry.BackoffFunc(int(retries)) } return child.conf.Consumer.Retry.Backoff } func (child *partitionConsumer) dispatcher() { for range child.trigger { select { case <-child.dying: close(child.trigger) case <-time.After(child.computeBackoff()): if child.broker != nil { child.consumer.unrefBrokerConsumer(child.broker) child.broker = nil } if err := child.dispatch(); err != nil { child.sendError(err) child.trigger <- none{} } } } if child.broker != nil { child.consumer.unrefBrokerConsumer(child.broker) } child.consumer.removeChild(child) close(child.feeder) } func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) { if child.preferredReadReplica >= 0 { broker, err := child.consumer.client.Broker(child.preferredReadReplica) if err == nil { return broker, child.leaderEpoch, nil } Logger.Printf( "consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader", child.topic, child.partition, child.preferredReadReplica) // if we couldn't find it, discard the replica preference and trigger a // metadata refresh whilst falling back to consuming from the leader again child.preferredReadReplica = invalidPreferredReplicaID _ = child.consumer.client.RefreshMetadata(child.topic) } // if preferred replica cannot be found fallback to leader return child.consumer.client.LeaderAndEpoch(child.topic, child.partition) } func (child *partitionConsumer) dispatch() error { if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { return err } broker, epoch, err := child.preferredBroker() if err != nil { return err } child.leaderEpoch = epoch child.broker = child.consumer.refBrokerConsumer(broker) child.broker.input <- child return nil } func (child *partitionConsumer) chooseStartingOffset(offset int64) error { newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) if err != nil { return err } child.highWaterMarkOffset = newestOffset oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) if err != nil { return err } switch { case offset == OffsetNewest: child.offset = newestOffset case offset == OffsetOldest: child.offset = oldestOffset case offset >= oldestOffset && offset <= newestOffset: child.offset = offset default: return ErrOffsetOutOfRange } return nil } func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { return child.messages } func (child *partitionConsumer) Errors() <-chan *ConsumerError { return child.errors } func (child *partitionConsumer) AsyncClose() { // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will // also just close itself) child.closeOnce.Do(func() { close(child.dying) }) } func (child *partitionConsumer) Close() error { child.AsyncClose() var consumerErrors ConsumerErrors for err := range child.errors { consumerErrors = append(consumerErrors, err) } if len(consumerErrors) > 0 { return consumerErrors } return nil } func (child *partitionConsumer) HighWaterMarkOffset() int64 { return atomic.LoadInt64(&child.highWaterMarkOffset) } func (child *partitionConsumer) responseFeeder() { var msgs []*ConsumerMessage expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) firstAttempt := true feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) if child.responseResult == nil { atomic.StoreInt32(&child.retries, 0) } for i, msg := range msgs { child.interceptors(msg) messageSelect: select { case <-child.dying: child.broker.acks.Done() continue feederLoop case child.messages <- msg: firstAttempt = true case <-expiryTicker.C: if !firstAttempt { child.responseResult = errTimedOut child.broker.acks.Done() remainingLoop: for _, msg = range msgs[i:] { child.interceptors(msg) select { case child.messages <- msg: case <-child.dying: break remainingLoop } } child.broker.input <- child continue feederLoop } else { // current message has not been sent, return to select // statement firstAttempt = false goto messageSelect } } } child.broker.acks.Done() } expiryTicker.Stop() close(child.messages) close(child.errors) } func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { var messages []*ConsumerMessage for _, msgBlock := range msgSet.Messages { for _, msg := range msgBlock.Messages() { offset := msg.Offset timestamp := msg.Msg.Timestamp if msg.Msg.Version >= 1 { baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset offset += baseOffset if msg.Msg.LogAppendTime { timestamp = msgBlock.Msg.Timestamp } } if offset < child.offset { continue } messages = append(messages, &ConsumerMessage{ Topic: child.topic, Partition: child.partition, Key: msg.Msg.Key, Value: msg.Msg.Value, Offset: offset, Timestamp: timestamp, BlockTimestamp: msgBlock.Msg.Timestamp, }) child.offset = offset + 1 } } if len(messages) == 0 { child.offset++ } return messages, nil } func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { messages := make([]*ConsumerMessage, 0, len(batch.Records)) for _, rec := range batch.Records { offset := batch.FirstOffset + rec.OffsetDelta if offset < child.offset { continue } timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) if batch.LogAppendTime { timestamp = batch.MaxTimestamp } messages = append(messages, &ConsumerMessage{ Topic: child.topic, Partition: child.partition, Key: rec.Key, Value: rec.Value, Offset: offset, Timestamp: timestamp, Headers: rec.Headers, }) child.offset = offset + 1 } if len(messages) == 0 { child.offset++ } return messages, nil } func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { var consumerBatchSizeMetric metrics.Histogram if child.consumer != nil && child.consumer.metricRegistry != nil { consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry) } // If request was throttled and empty we log and return without error if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 { Logger.Printf( "consumer/broker/%d FetchResponse throttled %v\n", child.broker.broker.ID(), response.ThrottleTime) return nil, nil } block := response.GetBlock(child.topic, child.partition) if block == nil { return nil, ErrIncompleteResponse } if !errors.Is(block.Err, ErrNoError) { return nil, block.Err } nRecs, err := block.numRecords() if err != nil { return nil, err } if consumerBatchSizeMetric != nil { consumerBatchSizeMetric.Update(int64(nRecs)) } if block.PreferredReadReplica != invalidPreferredReplicaID { child.preferredReadReplica = block.PreferredReadReplica } if nRecs == 0 { partialTrailingMessage, err := block.isPartial() if err != nil { return nil, err } // We got no messages. If we got a trailing one then we need to ask for more data. // Otherwise we just poll again and wait for one to be produced... if partialTrailingMessage { if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { // we can't ask for more data, we've hit the configured limit child.sendError(ErrMessageTooLarge) child.offset++ // skip this one so we can keep processing future messages } else { child.fetchSize *= 2 // check int32 overflow if child.fetchSize < 0 { child.fetchSize = math.MaxInt32 } if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { child.fetchSize = child.conf.Consumer.Fetch.Max } } } else if block.LastRecordsBatchOffset != nil && *block.LastRecordsBatchOffset < block.HighWaterMarkOffset { // check last record offset to avoid stuck if high watermark was not reached Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.LastRecordsBatchOffset) child.offset = *block.LastRecordsBatchOffset + 1 } return nil, nil } // we got messages, reset our fetch size in case it was increased for a previous request child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) // abortedProducerIDs contains producerID which message should be ignored as uncommitted // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) abortedTransactions := block.getAbortedTransactions() var messages []*ConsumerMessage for _, records := range block.RecordsSet { switch records.recordsType { case legacyRecords: messageSetMessages, err := child.parseMessages(records.MsgSet) if err != nil { return nil, err } messages = append(messages, messageSetMessages...) case defaultRecords: // Consume remaining abortedTransaction up to last offset of current batch for _, txn := range abortedTransactions { if txn.FirstOffset > records.RecordBatch.LastOffset() { break } abortedProducerIDs[txn.ProducerID] = struct{}{} // Pop abortedTransactions so that we never add it again abortedTransactions = abortedTransactions[1:] } recordBatchMessages, err := child.parseRecords(records.RecordBatch) if err != nil { return nil, err } // Parse and commit offset but do not expose messages that are: // - control records // - part of an aborted transaction when set to `ReadCommitted` // control record isControl, err := records.isControl() if err != nil { // I don't know why there is this continue in case of error to begin with // Safe bet is to ignore control messages if ReadUncommitted // and block on them in case of error and ReadCommitted if child.conf.Consumer.IsolationLevel == ReadCommitted { return nil, err } continue } if isControl { controlRecord, err := records.getControlRecord() if err != nil { return nil, err } if controlRecord.Type == ControlRecordAbort { delete(abortedProducerIDs, records.RecordBatch.ProducerID) } continue } // filter aborted transactions if child.conf.Consumer.IsolationLevel == ReadCommitted { _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] if records.RecordBatch.IsTransactional && isAborted { continue } } messages = append(messages, recordBatchMessages...) default: return nil, fmt.Errorf("unknown records type: %v", records.recordsType) } } return messages, nil } func (child *partitionConsumer) interceptors(msg *ConsumerMessage) { for _, interceptor := range child.conf.Consumer.Interceptors { msg.safelyApplyInterceptor(interceptor) } } // Pause implements PartitionConsumer. func (child *partitionConsumer) Pause() { atomic.StoreInt32(&child.paused, 1) } // Resume implements PartitionConsumer. func (child *partitionConsumer) Resume() { atomic.StoreInt32(&child.paused, 0) } // IsPaused implements PartitionConsumer. func (child *partitionConsumer) IsPaused() bool { return atomic.LoadInt32(&child.paused) == 1 } type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer subscriptions map[*partitionConsumer]none acks sync.WaitGroup refs int } func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { bc := &brokerConsumer{ consumer: c, broker: broker, input: make(chan *partitionConsumer), newSubscriptions: make(chan []*partitionConsumer), subscriptions: make(map[*partitionConsumer]none), refs: 0, } go withRecover(bc.subscriptionManager) go withRecover(bc.subscriptionConsumer) return bc } // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give // it nil if no new subscriptions are available. func (bc *brokerConsumer) subscriptionManager() { defer close(bc.newSubscriptions) for { var partitionConsumers []*partitionConsumer // Check for any partition consumer asking to subscribe if there aren't // any, trigger the network request (to fetch Kafka messages) by sending "nil" to the // newSubscriptions channel select { case pc, ok := <-bc.input: if !ok { return } partitionConsumers = append(partitionConsumers, pc) case bc.newSubscriptions <- nil: continue } // drain input of any further incoming subscriptions timer := time.NewTimer(partitionConsumersBatchTimeout) for batchComplete := false; !batchComplete; { select { case pc := <-bc.input: partitionConsumers = append(partitionConsumers, pc) case <-timer.C: batchComplete = true } } timer.Stop() Logger.Printf( "consumer/broker/%d accumulated %d new subscriptions\n", bc.broker.ID(), len(partitionConsumers)) bc.newSubscriptions <- partitionConsumers } } // subscriptionConsumer ensures we will get nil right away if no new subscriptions is available // this is the main loop that fetches Kafka messages func (bc *brokerConsumer) subscriptionConsumer() { for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) if len(bc.subscriptions) == 0 { // We're about to be shut down or we're about to receive more subscriptions. // Take a small nap to avoid burning the CPU. time.Sleep(partitionConsumersBatchTimeout) continue } response, err := bc.fetchNewMessages() if err != nil { Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) bc.abort(err) return } // if there isn't response, it means that not fetch was made // so we don't need to handle any response if response == nil { time.Sleep(partitionConsumersBatchTimeout) continue } bc.acks.Add(len(bc.subscriptions)) for child := range bc.subscriptions { if _, ok := response.Blocks[child.topic]; !ok { bc.acks.Done() continue } if _, ok := response.Blocks[child.topic][child.partition]; !ok { bc.acks.Done() continue } child.feeder <- response } bc.acks.Wait() bc.handleResponses() } } func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { for _, child := range newSubscriptions { bc.subscriptions[child] = none{} Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) } for child := range bc.subscriptions { select { case <-child.dying: Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) close(child.trigger) delete(bc.subscriptions, child) default: // no-op } } } // handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed func (bc *brokerConsumer) handleResponses() { for child := range bc.subscriptions { result := child.responseResult child.responseResult = nil if result == nil { if preferredBroker, _, err := child.preferredBroker(); err == nil { if bc.broker.ID() != preferredBroker.ID() { // not an error but needs redispatching to consume from preferred replica Logger.Printf( "consumer/broker/%d abandoned in favor of preferred replica broker/%d\n", bc.broker.ID(), preferredBroker.ID()) child.trigger <- none{} delete(bc.subscriptions, child) } } continue } // Discard any replica preference. child.preferredReadReplica = invalidPreferredReplicaID if errors.Is(result, errTimedOut) { Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) delete(bc.subscriptions, child) } else if errors.Is(result, ErrOffsetOutOfRange) { // there's no point in retrying this it will just fail the same way again // shut it down and force the user to choose what to do child.sendError(result) Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) close(child.trigger) delete(bc.subscriptions, child) } else if errors.Is(result, ErrUnknownTopicOrPartition) || errors.Is(result, ErrNotLeaderForPartition) || errors.Is(result, ErrLeaderNotAvailable) || errors.Is(result, ErrReplicaNotAvailable) || errors.Is(result, ErrFencedLeaderEpoch) || errors.Is(result, ErrUnknownLeaderEpoch) { // not an error, but does need redispatching Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", bc.broker.ID(), child.topic, child.partition, result) child.trigger <- none{} delete(bc.subscriptions, child) } else { // dunno, tell the user and try redispatching child.sendError(result) Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", bc.broker.ID(), child.topic, child.partition, result) child.trigger <- none{} delete(bc.subscriptions, child) } } } func (bc *brokerConsumer) abort(err error) { bc.consumer.abandonBrokerConsumer(bc) _ = bc.broker.Close() // we don't care about the error this might return, we already have one for child := range bc.subscriptions { child.sendError(err) child.trigger <- none{} } for newSubscriptions := range bc.newSubscriptions { if len(newSubscriptions) == 0 { // Take a small nap to avoid burning the CPU. time.Sleep(partitionConsumersBatchTimeout) continue } for _, child := range newSubscriptions { child.sendError(err) child.trigger <- none{} } } } // fetchResponse can be nil if no fetch is made, it can occur when // all partitions are paused func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request := &FetchRequest{ MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } // Version 1 is the same as version 0. if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { request.Version = 1 } // Starting in Version 2, the requestor must be able to handle Kafka Log // Message format version 1. if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in // the request is now relevant. Partitions will be processed in the order // they appear in the request. if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { request.Version = 3 request.MaxBytes = MaxResponseSize } // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be // able to handle Kafka log message format version 2. // Version 5 adds LogStartOffset to indicate the earliest available offset of // partition data that can be consumed. if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 5 request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } // Version 6 is the same as version 5. if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) { request.Version = 6 } // Version 7 adds incremental fetch request support. if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { request.Version = 7 // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 // and the epoch to -1 tells the broker not to generate as session ID we're going // to just ignore anyway. request.SessionID = 0 request.SessionEpoch = -1 } // Version 8 is the same as version 7. if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) { request.Version = 8 } // Version 9 adds CurrentLeaderEpoch, as described in KIP-320. // Version 10 indicates that we can use the ZStd compression algorithm, as // described in KIP-110. if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { request.Version = 10 } // Version 11 adds RackID for KIP-392 fetch from closest replica if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { request.Version = 11 request.RackID = bc.consumer.conf.RackID } for child := range bc.subscriptions { if !child.IsPaused() { request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch) } } // avoid to fetch when there is no block if len(request.blocks) == 0 { return nil, nil } return bc.broker.Fetch(request) } golang-github-ibm-sarama-1.43.2/consumer_group.go000066400000000000000000001040221461256741300217450ustar00rootroot00000000000000package sarama import ( "context" "errors" "fmt" "sort" "sync" "time" "github.com/rcrowley/go-metrics" ) // ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed") // ConsumerGroup is responsible for dividing up processing of topics and partitions // over a collection of processes (the members of the consumer group). type ConsumerGroup interface { // Consume joins a cluster of consumers for a given list of topics and // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler. // // The life-cycle of a session is represented by the following steps: // // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers) // and is assigned their "fair share" of partitions, aka 'claims'. // 2. Before processing starts, the handler's Setup() hook is called to notify the user // of the claims and allow any necessary preparation or alteration of state. // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected // from concurrent reads/writes. // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the // parent context is canceled or when a server-side rebalance cycle is initiated. // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called // to allow the user to perform any final tasks before a rebalance. // 6. Finally, marked offsets are committed one last time before claims are released. // // Please note, that once a rebalance is triggered, sessions must be completed within // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset // commit failures. // This method should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be // recreated to get the new claims. Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error // Errors returns a read channel of errors that occurred during the consumer life-cycle. // By default, errors are logged and not returned over this channel. // If you want to implement any custom error handling, set your config's // Consumer.Return.Errors setting to true, and read from this channel. Errors() <-chan error // Close stops the ConsumerGroup and detaches any running sessions. It is required to call // this function before the object passes out of scope, as it will otherwise leak memory. Close() error // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. Pause(partitions map[string][]int32) // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(partitions map[string][]int32) // Pause suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() // Resume resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } type consumerGroup struct { client Client config *Config consumer Consumer groupID string groupInstanceId *string memberID string errors chan error lock sync.Mutex errorsLock sync.RWMutex closed chan none closeOnce sync.Once userData []byte metricRegistry metrics.Registry } // NewConsumerGroup creates a new consumer group the given broker addresses and configuration. func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) { client, err := NewClient(addrs, config) if err != nil { return nil, err } c, err := newConsumerGroup(groupID, client) if err != nil { _ = client.Close() } return c, err } // NewConsumerGroupFromClient creates a new consumer group using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. // PLEASE NOTE: consumer groups can only re-use but not share clients. func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { if client == nil { return nil, ConfigurationError("client must not be nil") } // For clients passed in by the client, ensure we don't // call Close() on it. cli := &nopCloserClient{client} return newConsumerGroup(groupID, cli) } func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { config := client.Config() if !config.Version.IsAtLeast(V0_10_2_0) { return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") } consumer, err := newConsumer(client) if err != nil { return nil, err } cg := &consumerGroup{ client: client, consumer: consumer, config: config, groupID: groupID, errors: make(chan error, config.ChannelBufferSize), closed: make(chan none), userData: config.Consumer.Group.Member.UserData, metricRegistry: newCleanupRegistry(config.MetricRegistry), } if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { cg.groupInstanceId = &config.Consumer.Group.InstanceId } return cg, nil } // Errors implements ConsumerGroup. func (c *consumerGroup) Errors() <-chan error { return c.errors } // Close implements ConsumerGroup. func (c *consumerGroup) Close() (err error) { c.closeOnce.Do(func() { close(c.closed) // leave group if e := c.leave(); e != nil { err = e } go func() { c.errorsLock.Lock() defer c.errorsLock.Unlock() close(c.errors) }() // drain errors for e := range c.errors { err = e } if e := c.client.Close(); e != nil { err = e } c.metricRegistry.UnregisterAll() }) return } // Consume implements ConsumerGroup. func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error { // Ensure group is not closed select { case <-c.closed: return ErrClosedConsumerGroup default: } c.lock.Lock() defer c.lock.Unlock() // Quick exit when no topics are provided if len(topics) == 0 { return fmt.Errorf("no topics provided") } // Refresh metadata for requested topics if err := c.client.RefreshMetadata(topics...); err != nil { return err } // Init session sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) if errors.Is(err, ErrClosedClient) { return ErrClosedConsumerGroup } else if err != nil { return err } // Wait for session exit signal or Close() call select { case <-c.closed: case <-sess.ctx.Done(): } // Gracefully release session claims return sess.release(true) } // Pause implements ConsumerGroup. func (c *consumerGroup) Pause(partitions map[string][]int32) { c.consumer.Pause(partitions) } // Resume implements ConsumerGroup. func (c *consumerGroup) Resume(partitions map[string][]int32) { c.consumer.Resume(partitions) } // PauseAll implements ConsumerGroup. func (c *consumerGroup) PauseAll() { c.consumer.PauseAll() } // ResumeAll implements ConsumerGroup. func (c *consumerGroup) ResumeAll() { c.consumer.ResumeAll() } func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { select { case <-ctx.Done(): return nil, ctx.Err() case <-c.closed: return nil, ErrClosedConsumerGroup case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): } if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { if retries <= 0 { return nil, err } return c.retryNewSession(ctx, topics, handler, retries-1, true) } } return c.newSession(ctx, topics, handler, retries-1) } func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { if ctx.Err() != nil { return nil, ctx.Err() } coordinator, err := c.client.Coordinator(c.groupID) if err != nil { if retries <= 0 { return nil, err } return c.retryNewSession(ctx, topics, handler, retries, true) } var ( metricRegistry = c.metricRegistry consumerGroupJoinTotal metrics.Counter consumerGroupJoinFailed metrics.Counter consumerGroupSyncTotal metrics.Counter consumerGroupSyncFailed metrics.Counter ) if metricRegistry != nil { consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry) consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry) consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry) consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry) } // Join consumer group join, err := c.joinGroupRequest(coordinator, topics) if consumerGroupJoinTotal != nil { consumerGroupJoinTotal.Inc(1) } if err != nil { _ = coordinator.Close() if consumerGroupJoinFailed != nil { consumerGroupJoinFailed.Inc(1) } return nil, err } if !errors.Is(join.Err, ErrNoError) { if consumerGroupJoinFailed != nil { consumerGroupJoinFailed.Inc(1) } } switch join.Err { case ErrNoError: c.memberID = join.MemberId case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: // retry after backoff if retries <= 0 { return nil, join.Err } return c.retryNewSession(ctx, topics, handler, retries, true) case ErrMemberIdRequired: // from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts // with an empty member id, it needs to get the assigned id from the // response and send another join request with that id to actually join the // group c.memberID = join.MemberId return c.newSession(ctx, topics, handler, retries) case ErrFencedInstancedId: if c.groupInstanceId != nil { Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } return nil, join.Err default: return nil, join.Err } var strategy BalanceStrategy var ok bool if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil { strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies) if !ok { // this case shouldn't happen in practice, since the leader will choose the protocol // that all the members support return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol) } } // Prepare distribution plan if we joined as the leader var plan BalanceStrategyPlan var members map[string]ConsumerGroupMemberMetadata var allSubscribedTopicPartitions map[string][]int32 var allSubscribedTopics []string if join.LeaderId == join.MemberId { members, err = join.GetMembers() if err != nil { return nil, err } allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members) if err != nil { return nil, err } } // Sync consumer group syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy) if consumerGroupSyncTotal != nil { consumerGroupSyncTotal.Inc(1) } if err != nil { _ = coordinator.Close() if consumerGroupSyncFailed != nil { consumerGroupSyncFailed.Inc(1) } return nil, err } if !errors.Is(syncGroupResponse.Err, ErrNoError) { if consumerGroupSyncFailed != nil { consumerGroupSyncFailed.Inc(1) } } switch syncGroupResponse.Err { case ErrNoError: case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: // retry after backoff if retries <= 0 { return nil, syncGroupResponse.Err } return c.retryNewSession(ctx, topics, handler, retries, true) case ErrFencedInstancedId: if c.groupInstanceId != nil { Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } return nil, syncGroupResponse.Err default: return nil, syncGroupResponse.Err } // Retrieve and sort claims var claims map[string][]int32 if len(syncGroupResponse.MemberAssignment) > 0 { members, err := syncGroupResponse.GetMemberAssignment() if err != nil { return nil, err } claims = members.Topics // in the case of stateful balance strategies, hold on to the returned // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData } else { c.userData = c.config.Consumer.Group.Member.UserData } for _, partitions := range claims { sort.Sort(int32Slice(partitions)) } } session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) if err != nil { return nil, err } // only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance if join.LeaderId == join.MemberId { go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session) } return session, err } func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { req := &JoinGroupRequest{ GroupId: c.groupID, MemberId: c.memberID, SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond), ProtocolType: "consumer", } if c.config.Version.IsAtLeast(V0_10_1_0) { req.Version = 1 req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } if c.config.Version.IsAtLeast(V0_11_0_0) { req.Version = 2 } if c.config.Version.IsAtLeast(V0_11_0_0) { req.Version = 2 } if c.config.Version.IsAtLeast(V2_0_0_0) { req.Version = 3 } // from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually // send two JoinGroupRequests, once with the empty member id, and then again // with the assigned id from the first response. This is handled via the // ErrMemberIdRequired case. if c.config.Version.IsAtLeast(V2_2_0_0) { req.Version = 4 } if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 5 req.GroupInstanceId = c.groupInstanceId } meta := &ConsumerGroupMemberMetadata{ Topics: topics, UserData: c.userData, } var strategy BalanceStrategy if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil { if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { return nil, err } } else { for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies { if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { return nil, err } } } return coordinator.JoinGroup(req) } // findStrategy returns the BalanceStrategy with the specified protocolName // from the slice provided. func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) { for _, strategy := range groupStrategies { if strategy.Name() == name { return strategy, true } } return nil, false } func (c *consumerGroup) syncGroupRequest( coordinator *Broker, members map[string]ConsumerGroupMemberMetadata, plan BalanceStrategyPlan, generationID int32, strategy BalanceStrategy, ) (*SyncGroupResponse, error) { req := &SyncGroupRequest{ GroupId: c.groupID, MemberId: c.memberID, GenerationId: generationID, } // Versions 1 and 2 are the same as version 0. if c.config.Version.IsAtLeast(V0_11_0_0) { req.Version = 1 } if c.config.Version.IsAtLeast(V2_0_0_0) { req.Version = 2 } // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 req.GroupInstanceId = c.groupInstanceId } for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) if err != nil { return nil, err } assignment.UserData = userDataBytes if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { return nil, err } delete(members, memberID) } // add empty assignments for any remaining members for memberID := range members { if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil { return nil, err } } return coordinator.SyncGroup(req) } func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) { req := &HeartbeatRequest{ GroupId: c.groupID, MemberId: memberID, GenerationId: generationID, } // Version 1 and version 2 are the same as version 0. if c.config.Version.IsAtLeast(V0_11_0_0) { req.Version = 1 } if c.config.Version.IsAtLeast(V2_0_0_0) { req.Version = 2 } // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 req.GroupInstanceId = c.groupInstanceId } return coordinator.Heartbeat(req) } func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) { topicPartitions := make(map[string][]int32) for _, meta := range members { for _, topic := range meta.Topics { topicPartitions[topic] = nil } } allSubscribedTopics := make([]string, 0, len(topicPartitions)) for topic := range topicPartitions { allSubscribedTopics = append(allSubscribedTopics, topic) } // refresh metadata for all the subscribed topics in the consumer group // to avoid using stale metadata to assigning partitions err := c.client.RefreshMetadata(allSubscribedTopics...) if err != nil { return nil, nil, nil, err } for topic := range topicPartitions { partitions, err := c.client.Partitions(topic) if err != nil { return nil, nil, nil, err } topicPartitions[topic] = partitions } plan, err := strategy.Plan(members, topicPartitions) return topicPartitions, allSubscribedTopics, plan, err } // Leaves the cluster, called by Close. func (c *consumerGroup) leave() error { c.lock.Lock() defer c.lock.Unlock() if c.memberID == "" { return nil } coordinator, err := c.client.Coordinator(c.groupID) if err != nil { return err } // as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID if c.groupInstanceId != nil { c.memberID = "" return nil } req := &LeaveGroupRequest{ GroupId: c.groupID, MemberId: c.memberID, } if c.config.Version.IsAtLeast(V0_11_0_0) { req.Version = 1 } if c.config.Version.IsAtLeast(V2_0_0_0) { req.Version = 2 } if c.config.Version.IsAtLeast(V2_4_0_0) { req.Version = 3 req.Members = append(req.Members, MemberIdentity{ MemberId: c.memberID, }) } resp, err := coordinator.LeaveGroup(req) if err != nil { _ = coordinator.Close() return err } // clear the memberID c.memberID = "" switch resp.Err { case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: return nil default: return resp.Err } } func (c *consumerGroup) handleError(err error, topic string, partition int32) { var consumerError *ConsumerError if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 { err = &ConsumerError{ Topic: topic, Partition: partition, Err: err, } } if !c.config.Consumer.Return.Errors { Logger.Println(err) return } c.errorsLock.RLock() defer c.errorsLock.RUnlock() select { case <-c.closed: // consumer is closed return default: } select { case c.errors <- err: default: // no error listener } } func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) { if c.config.Metadata.RefreshFrequency == time.Duration(0) { return } defer session.cancel() oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions)) for topic, partitions := range allSubscribedTopicPartitions { oldTopicToPartitionNum[topic] = len(partitions) } pause := time.NewTicker(c.config.Metadata.RefreshFrequency) defer pause.Stop() for { if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { return } else { for topic, num := range oldTopicToPartitionNum { if newTopicToPartitionNum[topic] != num { Logger.Printf( "consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n", c.groupID, topics, num, newTopicToPartitionNum[topic]) return // trigger the end of the session on exit } } } select { case <-pause.C: case <-session.ctx.Done(): Logger.Printf( "consumergroup/%s loop check partition number goroutine will exit, topics %s\n", c.groupID, topics) // if session closed by other, should be exited return case <-c.closed: return } } } func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { topicToPartitionNum := make(map[string]int, len(topics)) for _, topic := range topics { if partitionNum, err := c.client.Partitions(topic); err != nil { Logger.Printf( "consumergroup/%s topic %s get partition number failed due to '%v'\n", c.groupID, topic, err) return nil, err } else { topicToPartitionNum[topic] = len(partitionNum) } } return topicToPartitionNum, nil } // -------------------------------------------------------------------- // ConsumerGroupSession represents a consumer group member session. type ConsumerGroupSession interface { // Claims returns information about the claimed partitions by topic. Claims() map[string][]int32 // MemberID returns the cluster member ID. MemberID() string // GenerationID returns the current generation ID. GenerationID() int32 // MarkOffset marks the provided offset, alongside a metadata string // that represents the state of the partition consumer at that point in time. The // metadata string can be used by another consumer to restore that state, so it // can resume consumption. // // To follow upstream conventions, you are expected to mark the offset of the // next message to read, not the last message read. Thus, when calling `MarkOffset` // you should typically add one to the offset of the last consumed message. // // Note: calling MarkOffset does not necessarily commit the offset to the backend // store immediately for efficiency reasons, and it may never be committed if // your application crashes. This means that you may end up processing the same // message twice, and your processing should ideally be idempotent. MarkOffset(topic string, partition int32, offset int64, metadata string) // Commit the offset to the backend // // Note: calling Commit performs a blocking synchronous operation. Commit() // ResetOffset resets to the provided offset, alongside a metadata string that // represents the state of the partition consumer at that point in time. Reset // acts as a counterpart to MarkOffset, the difference being that it allows to // reset an offset to an earlier or smaller value, where MarkOffset only // allows incrementing the offset. cf MarkOffset for more details. ResetOffset(topic string, partition int32, offset int64, metadata string) // MarkMessage marks a message as consumed. MarkMessage(msg *ConsumerMessage, metadata string) // Context returns the session context. Context() context.Context } type consumerGroupSession struct { parent *consumerGroup memberID string generationID int32 handler ConsumerGroupHandler claims map[string][]int32 offsets *offsetManager ctx context.Context cancel func() waitGroup sync.WaitGroup releaseOnce sync.Once hbDying, hbDead chan none } func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { // init context ctx, cancel := context.WithCancel(ctx) // init offset manager offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel) if err != nil { return nil, err } // init session sess := &consumerGroupSession{ parent: parent, memberID: memberID, generationID: generationID, handler: handler, offsets: offsets, claims: claims, ctx: ctx, cancel: cancel, hbDying: make(chan none), hbDead: make(chan none), } // start heartbeat loop go sess.heartbeatLoop() // create a POM for each claim for topic, partitions := range claims { for _, partition := range partitions { pom, err := offsets.ManagePartition(topic, partition) if err != nil { _ = sess.release(false) return nil, err } // handle POM errors go func(topic string, partition int32) { for err := range pom.Errors() { sess.parent.handleError(err, topic, partition) } }(topic, partition) } } // perform setup if err := handler.Setup(sess); err != nil { _ = sess.release(true) return nil, err } // start consuming for topic, partitions := range claims { for _, partition := range partitions { sess.waitGroup.Add(1) go func(topic string, partition int32) { defer sess.waitGroup.Done() // cancel the as session as soon as the first // goroutine exits defer sess.cancel() // consume a single topic/partition, blocking sess.consume(topic, partition) }(topic, partition) } } return sess, nil } func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims } func (s *consumerGroupSession) MemberID() string { return s.memberID } func (s *consumerGroupSession) GenerationID() int32 { return s.generationID } func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { if pom := s.offsets.findPOM(topic, partition); pom != nil { pom.MarkOffset(offset, metadata) } } func (s *consumerGroupSession) Commit() { s.offsets.Commit() } func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { if pom := s.offsets.findPOM(topic, partition); pom != nil { pom.ResetOffset(offset, metadata) } } func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) { s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata) } func (s *consumerGroupSession) Context() context.Context { return s.ctx } func (s *consumerGroupSession) consume(topic string, partition int32) { // quick exit if rebalance is due select { case <-s.ctx.Done(): return case <-s.parent.closed: return default: } // get next offset offset := s.parent.config.Consumer.Offsets.Initial if pom := s.offsets.findPOM(topic, partition); pom != nil { offset, _ = pom.NextOffset() } // create new claim claim, err := newConsumerGroupClaim(s, topic, partition, offset) if err != nil { s.parent.handleError(err, topic, partition) return } // handle errors go func() { for err := range claim.Errors() { s.parent.handleError(err, topic, partition) } }() // trigger close when session is done go func() { select { case <-s.ctx.Done(): case <-s.parent.closed: } claim.AsyncClose() }() // start processing if err := s.handler.ConsumeClaim(s, claim); err != nil { s.parent.handleError(err, topic, partition) } // ensure consumer is closed & drained claim.AsyncClose() for _, err := range claim.waitClosed() { s.parent.handleError(err, topic, partition) } } func (s *consumerGroupSession) release(withCleanup bool) (err error) { // signal release, stop heartbeat s.cancel() // wait for consumers to exit s.waitGroup.Wait() // perform release s.releaseOnce.Do(func() { if withCleanup { if e := s.handler.Cleanup(s); e != nil { s.parent.handleError(e, "", -1) err = e } } if e := s.offsets.Close(); e != nil { err = e } close(s.hbDying) <-s.hbDead }) Logger.Printf( "consumergroup/session/%s/%d released\n", s.MemberID(), s.GenerationID()) return } func (s *consumerGroupSession) heartbeatLoop() { defer close(s.hbDead) defer s.cancel() // trigger the end of the session on exit defer func() { Logger.Printf( "consumergroup/session/%s/%d heartbeat loop stopped\n", s.MemberID(), s.GenerationID()) }() pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) defer pause.Stop() retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff) defer retryBackoff.Stop() retries := s.parent.config.Metadata.Retry.Max for { coordinator, err := s.parent.client.Coordinator(s.parent.groupID) if err != nil { if retries <= 0 { s.parent.handleError(err, "", -1) return } retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff) select { case <-s.hbDying: return case <-retryBackoff.C: retries-- } continue } resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) if err != nil { _ = coordinator.Close() if retries <= 0 { s.parent.handleError(err, "", -1) return } retries-- continue } switch resp.Err { case ErrNoError: retries = s.parent.config.Metadata.Retry.Max case ErrRebalanceInProgress: retries = s.parent.config.Metadata.Retry.Max s.cancel() case ErrUnknownMemberId, ErrIllegalGeneration: return case ErrFencedInstancedId: if s.parent.groupInstanceId != nil { Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId) } s.parent.handleError(resp.Err, "", -1) return default: s.parent.handleError(resp.Err, "", -1) return } select { case <-pause.C: case <-s.hbDying: return } } } // -------------------------------------------------------------------- // ConsumerGroupHandler instances are used to handle individual topic/partition claims. // It also provides hooks for your consumer group session life-cycle and allow you to // trigger logic before or after the consume loop(s). // // PLEASE NOTE that handlers are likely be called from several goroutines concurrently, // ensure that all state is safely protected against race conditions. type ConsumerGroupHandler interface { // Setup is run at the beginning of a new session, before ConsumeClaim. Setup(ConsumerGroupSession) error // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited // but before the offsets are committed for the very last time. Cleanup(ConsumerGroupSession) error // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). // Once the Messages() channel is closed, the Handler must finish its processing // loop and exit. ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error } // ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group. type ConsumerGroupClaim interface { // Topic returns the consumed topic name. Topic() string // Partition returns the consumed partition. Partition() int32 // InitialOffset returns the initial offset that was used as a starting point for this claim. InitialOffset() int64 // HighWaterMarkOffset returns the high watermark offset of the partition, // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 // Messages returns the read channel for the messages that are returned by // the broker. The messages channel will be closed when a new rebalance cycle // is due. You must finish processing and mark offsets within // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually // re-assigned to another group member. Messages() <-chan *ConsumerMessage } type consumerGroupClaim struct { topic string partition int32 offset int64 PartitionConsumer } func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets { offset = sess.parent.config.Consumer.Offsets.Initial pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) } if err != nil { return nil, err } go func() { for err := range pcm.Errors() { sess.parent.handleError(err, topic, partition) } }() return &consumerGroupClaim{ topic: topic, partition: partition, offset: offset, PartitionConsumer: pcm, }, nil } func (c *consumerGroupClaim) Topic() string { return c.topic } func (c *consumerGroupClaim) Partition() int32 { return c.partition } func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset } // Drains messages and errors, ensures the claim is fully closed. func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) { go func() { for range c.Messages() { } }() for err := range c.Errors() { errs = append(errs, err) } return } golang-github-ibm-sarama-1.43.2/consumer_group_example_test.go000066400000000000000000000025371461256741300245270ustar00rootroot00000000000000package sarama import ( "context" "fmt" ) type exampleConsumerGroupHandler struct{} func (exampleConsumerGroupHandler) Setup(_ ConsumerGroupSession) error { return nil } func (exampleConsumerGroupHandler) Cleanup(_ ConsumerGroupSession) error { return nil } func (h exampleConsumerGroupHandler) ConsumeClaim(sess ConsumerGroupSession, claim ConsumerGroupClaim) error { for msg := range claim.Messages() { fmt.Printf("Message topic:%q partition:%d offset:%d\n", msg.Topic, msg.Partition, msg.Offset) sess.MarkMessage(msg, "") } return nil } func ExampleConsumerGroup() { config := NewTestConfig() config.Version = V2_0_0_0 // specify appropriate version config.Consumer.Return.Errors = true group, err := NewConsumerGroup([]string{"localhost:9092"}, "my-group", config) if err != nil { panic(err) } defer func() { _ = group.Close() }() // Track errors go func() { for err := range group.Errors() { fmt.Println("ERROR", err) } }() // Iterate over consumer sessions. ctx := context.Background() for { topics := []string{"my-topic"} handler := exampleConsumerGroupHandler{} // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be // recreated to get the new claims err := group.Consume(ctx, topics, handler) if err != nil { panic(err) } } } golang-github-ibm-sarama-1.43.2/consumer_group_members.go000066400000000000000000000074201461256741300234630ustar00rootroot00000000000000package sarama import "errors" // ConsumerGroupMemberMetadata holds the metadata for consumer group // https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json type ConsumerGroupMemberMetadata struct { Version int16 Topics []string UserData []byte OwnedPartitions []*OwnedPartition GenerationID int32 RackID *string } func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { pe.putInt16(m.Version) if err := pe.putStringArray(m.Topics); err != nil { return err } if err := pe.putBytes(m.UserData); err != nil { return err } if m.Version >= 1 { if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil { return err } for _, op := range m.OwnedPartitions { if err := op.encode(pe); err != nil { return err } } } if m.Version >= 2 { pe.putInt32(m.GenerationID) } if m.Version >= 3 { if err := pe.putNullableString(m.RackID); err != nil { return err } } return nil } func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { if m.Version, err = pd.getInt16(); err != nil { return } if m.Topics, err = pd.getStringArray(); err != nil { return } if m.UserData, err = pd.getBytes(); err != nil { return } if m.Version >= 1 { n, err := pd.getArrayLength() if err != nil { // permit missing data here in case of misbehaving 3rd party // clients who incorrectly marked the member metadata as V1 in // their JoinGroup request if errors.Is(err, ErrInsufficientData) { return nil } return err } if n > 0 { m.OwnedPartitions = make([]*OwnedPartition, n) for i := 0; i < n; i++ { m.OwnedPartitions[i] = &OwnedPartition{} if err := m.OwnedPartitions[i].decode(pd); err != nil { return err } } } } if m.Version >= 2 { if m.GenerationID, err = pd.getInt32(); err != nil { return err } } if m.Version >= 3 { if m.RackID, err = pd.getNullableString(); err != nil { return err } } return nil } type OwnedPartition struct { Topic string Partitions []int32 } func (m *OwnedPartition) encode(pe packetEncoder) error { if err := pe.putString(m.Topic); err != nil { return err } if err := pe.putInt32Array(m.Partitions); err != nil { return err } return nil } func (m *OwnedPartition) decode(pd packetDecoder) (err error) { if m.Topic, err = pd.getString(); err != nil { return err } if m.Partitions, err = pd.getInt32Array(); err != nil { return err } return nil } // ConsumerGroupMemberAssignment holds the member assignment for a consume group // https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json type ConsumerGroupMemberAssignment struct { Version int16 Topics map[string][]int32 UserData []byte } func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { pe.putInt16(m.Version) if err := pe.putArrayLength(len(m.Topics)); err != nil { return err } for topic, partitions := range m.Topics { if err := pe.putString(topic); err != nil { return err } if err := pe.putInt32Array(partitions); err != nil { return err } } if err := pe.putBytes(m.UserData); err != nil { return err } return nil } func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { if m.Version, err = pd.getInt16(); err != nil { return } var topicLen int if topicLen, err = pd.getArrayLength(); err != nil { return } m.Topics = make(map[string][]int32, topicLen) for i := 0; i < topicLen; i++ { var topic string if topic, err = pd.getString(); err != nil { return } if m.Topics[topic], err = pd.getInt32Array(); err != nil { return } } if m.UserData, err = pd.getBytes(); err != nil { return } return nil } golang-github-ibm-sarama-1.43.2/consumer_group_members_test.go000066400000000000000000000066631461256741300245320ustar00rootroot00000000000000package sarama import ( "bytes" "reflect" "testing" ) var ( groupMemberMetadataV0 = []byte{ 0, 0, // Version 0, 0, 0, 2, // Topic array length 0, 3, 'o', 'n', 'e', // Topic one 0, 3, 't', 'w', 'o', // Topic two 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata } groupMemberAssignmentV0 = []byte{ 0, 0, // Version 0, 0, 0, 1, // Topic array length 0, 3, 'o', 'n', 'e', // Topic one 0, 0, 0, 3, // Topic one, partition array length 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata } // notably it looks like the old 3rdparty bsm/sarama-cluster incorrectly // set V1 in the member metadata when it sent the JoinGroup request so // we need to cope with that one being too short groupMemberMetadataV1Bad = []byte{ 0, 1, // Version 0, 0, 0, 2, // Topic array length 0, 3, 'o', 'n', 'e', // Topic one 0, 3, 't', 'w', 'o', // Topic two 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata } groupMemberMetadataV1 = []byte{ 0, 1, // Version 0, 0, 0, 2, // Topic array length 0, 3, 'o', 'n', 'e', // Topic one 0, 3, 't', 'w', 'o', // Topic two 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata 0, 0, 0, 0, // OwnedPartitions KIP-429 } groupMemberMetadataV3NilOwned = []byte{ 0, 3, // Version 0, 0, 0, 1, // Topic array length 0, 3, 'o', 'n', 'e', // Topic one 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata 0, 0, 0, 0, // OwnedPartitions KIP-429 0, 0, 0, 64, // GenerationID 0, 4, 'r', 'a', 'c', 'k', // RackID } ) func TestConsumerGroupMemberMetadata(t *testing.T) { meta := &ConsumerGroupMemberMetadata{ Version: 0, Topics: []string{"one", "two"}, UserData: []byte{0x01, 0x02, 0x03}, } buf, err := encode(meta, nil) if err != nil { t.Error("Failed to encode data", err) } else if !bytes.Equal(groupMemberMetadataV0, buf) { t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadataV0, buf) } meta2 := new(ConsumerGroupMemberMetadata) err = decode(buf, meta2, nil) if err != nil { t.Error("Failed to decode data", err) } else if !reflect.DeepEqual(meta, meta2) { t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2) } } func TestConsumerGroupMemberMetadataV1Decode(t *testing.T) { meta := new(ConsumerGroupMemberMetadata) if err := decode(groupMemberMetadataV1, meta, nil); err != nil { t.Error("Failed to decode V1 data", err) } if err := decode(groupMemberMetadataV1Bad, meta, nil); err != nil { t.Error("Failed to decode V1 'bad' data", err) } } func TestConsumerGroupMemberMetadataV3Decode(t *testing.T) { meta := new(ConsumerGroupMemberMetadata) if err := decode(groupMemberMetadataV3NilOwned, meta, nil); err != nil { t.Error("Failed to decode V3 data", err) } } func TestConsumerGroupMemberAssignment(t *testing.T) { amt := &ConsumerGroupMemberAssignment{ Version: 0, Topics: map[string][]int32{ "one": {0, 2, 4}, }, UserData: []byte{0x01, 0x02, 0x03}, } buf, err := encode(amt, nil) if err != nil { t.Error("Failed to encode data", err) } else if !bytes.Equal(groupMemberAssignmentV0, buf) { t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignmentV0, buf) } amt2 := new(ConsumerGroupMemberAssignment) err = decode(buf, amt2, nil) if err != nil { t.Error("Failed to decode data", err) } else if !reflect.DeepEqual(amt, amt2) { t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2) } } golang-github-ibm-sarama-1.43.2/consumer_group_test.go000066400000000000000000000153401461256741300230100ustar00rootroot00000000000000package sarama import ( "context" "errors" "sync" "testing" "time" assert "github.com/stretchr/testify/require" ) type handler struct { *testing.T cancel context.CancelFunc } func (h *handler) Setup(s ConsumerGroupSession) error { return nil } func (h *handler) Cleanup(s ConsumerGroupSession) error { return nil } func (h *handler) ConsumeClaim(sess ConsumerGroupSession, claim ConsumerGroupClaim) error { for msg := range claim.Messages() { sess.MarkMessage(msg, "") h.Logf("consumed msg %v", msg) h.cancel() break } return nil } func TestNewConsumerGroupFromClient(t *testing.T) { t.Run("should not permit nil client", func(t *testing.T) { group, err := NewConsumerGroupFromClient("group", nil) assert.Nil(t, group) assert.Error(t, err) }) } // TestConsumerGroupNewSessionDuringOffsetLoad ensures that the consumer group // will retry Join and Sync group operations, if it receives a temporary // OffsetsLoadInProgress error response, in the same way as it would for a // RebalanceInProgress. func TestConsumerGroupNewSessionDuringOffsetLoad(t *testing.T) { config := NewTestConfig() config.ClientID = t.Name() config.Version = V2_0_0_0 config.Consumer.Return.Errors = true config.Consumer.Group.Rebalance.Retry.Max = 2 config.Consumer.Group.Rebalance.Retry.Backoff = 0 config.Consumer.Offsets.AutoCommit.Enable = false broker0 := NewMockBroker(t, 0) defer broker0.Close() broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my-topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my-topic", 0, OffsetOldest, 0). SetOffset("my-topic", 0, OffsetNewest, 1), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t). SetCoordinator(CoordinatorGroup, "my-group", broker0), "HeartbeatRequest": NewMockHeartbeatResponse(t), "JoinGroupRequest": NewMockSequence( NewMockJoinGroupResponse(t).SetError(ErrOffsetsLoadInProgress), NewMockJoinGroupResponse(t).SetGroupProtocol(RangeBalanceStrategyName), ), "SyncGroupRequest": NewMockSequence( NewMockSyncGroupResponse(t).SetError(ErrOffsetsLoadInProgress), NewMockSyncGroupResponse(t).SetMemberAssignment( &ConsumerGroupMemberAssignment{ Version: 0, Topics: map[string][]int32{ "my-topic": {0}, }, }), ), "OffsetFetchRequest": NewMockOffsetFetchResponse(t).SetOffset( "my-group", "my-topic", 0, 0, "", ErrNoError, ).SetError(ErrNoError), "FetchRequest": NewMockSequence( NewMockFetchResponse(t, 1). SetMessage("my-topic", 0, 0, StringEncoder("foo")). SetMessage("my-topic", 0, 1, StringEncoder("bar")), NewMockFetchResponse(t, 1), ), }) group, err := NewConsumerGroup([]string{broker0.Addr()}, "my-group", config) if err != nil { t.Fatal(err) } defer func() { _ = group.Close() }() ctx, cancel := context.WithCancel(context.Background()) h := &handler{t, cancel} var wg sync.WaitGroup wg.Add(1) go func() { topics := []string{"my-topic"} if err := group.Consume(ctx, topics, h); err != nil { t.Error(err) } wg.Done() }() wg.Wait() } func TestConsume_RaceTest(t *testing.T) { const ( groupID = "test-group" topic = "test-topic" offsetStart = int64(1234) ) cfg := NewTestConfig() cfg.Version = V2_8_1_0 cfg.Consumer.Return.Errors = true cfg.Metadata.Full = true seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() handlerMap := map[string]MockResponse{ "ApiVersionsRequest": NewMockApiVersionsResponse(t), "MetadataRequest": NewMockMetadataResponse(t). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetError("mismatched-topic", ErrUnknownTopicOrPartition), "OffsetRequest": NewMockOffsetResponse(t). SetOffset(topic, 0, -1, offsetStart), "OffsetFetchRequest": NewMockOffsetFetchResponse(t). SetOffset(groupID, topic, 0, offsetStart, "", ErrNoError), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t). SetCoordinator(CoordinatorGroup, groupID, seedBroker), "JoinGroupRequest": NewMockJoinGroupResponse(t), "SyncGroupRequest": NewMockSyncGroupResponse(t).SetMemberAssignment( &ConsumerGroupMemberAssignment{ Version: 1, Topics: map[string][]int32{topic: {0}}, // map "test-topic" to partition 0 UserData: []byte{0x01}, }, ), "HeartbeatRequest": NewMockHeartbeatResponse(t), } seedBroker.SetHandlerByMap(handlerMap) cancelCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second)) retryWait := 10 * time.Millisecond var err error clientRetries := 0 outerFor: for { _, err = NewConsumerGroup([]string{seedBroker.Addr()}, groupID, cfg) if err == nil { break } if retryWait < time.Minute { retryWait *= 2 } clientRetries++ timer := time.NewTimer(retryWait) select { case <-cancelCtx.Done(): err = cancelCtx.Err() timer.Stop() break outerFor case <-timer.C: } timer.Stop() } if err == nil { t.Fatalf("should not proceed to Consume") } if clientRetries <= 1 { t.Errorf("clientRetries = %v; want > 1", clientRetries) } if err != nil && !errors.Is(err, context.DeadlineExceeded) { t.Fatal(err) } cancel() } // TestConsumerGroupSessionDoesNotRetryForever ensures that an error fetching // the coordinator decrements the retry attempts and doesn't end up retrying // forever func TestConsumerGroupSessionDoesNotRetryForever(t *testing.T) { config := NewTestConfig() config.ClientID = t.Name() config.Version = V2_0_0_0 config.Consumer.Return.Errors = true config.Consumer.Group.Rebalance.Retry.Max = 1 config.Consumer.Group.Rebalance.Retry.Backoff = 0 broker0 := NewMockBroker(t, 0) defer broker0.Close() broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my-topic", 0, broker0.BrokerID()), "FindCoordinatorRequest": NewMockFindCoordinatorResponse(t). SetError(CoordinatorGroup, "my-group", ErrGroupAuthorizationFailed), }) group, err := NewConsumerGroup([]string{broker0.Addr()}, "my-group", config) if err != nil { t.Fatal(err) } defer func() { _ = group.Close() }() ctx, cancel := context.WithCancel(context.Background()) h := &handler{t, cancel} var wg sync.WaitGroup wg.Add(1) go func() { topics := []string{"my-topic"} err := group.Consume(ctx, topics, h) assert.Error(t, err) wg.Done() }() wg.Wait() } func TestConsumerShouldNotRetrySessionIfContextCancelled(t *testing.T) { c := &consumerGroup{ config: NewTestConfig(), } ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := c.newSession(ctx, nil, nil, 1024) assert.Equal(t, context.Canceled, err) _, err = c.retryNewSession(ctx, nil, nil, 1024, true) assert.Equal(t, context.Canceled, err) } golang-github-ibm-sarama-1.43.2/consumer_metadata_request.go000066400000000000000000000021321461256741300241400ustar00rootroot00000000000000package sarama // ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { Version int16 ConsumerGroup string } func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { tmp := new(FindCoordinatorRequest) tmp.CoordinatorKey = r.ConsumerGroup tmp.CoordinatorType = CoordinatorGroup tmp.Version = r.Version return tmp.encode(pe) } func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { tmp := new(FindCoordinatorRequest) if err := tmp.decode(pd, version); err != nil { return err } r.ConsumerGroup = tmp.CoordinatorKey return nil } func (r *ConsumerMetadataRequest) key() int16 { return 10 } func (r *ConsumerMetadataRequest) version() int16 { return r.Version } func (r *ConsumerMetadataRequest) headerVersion() int16 { return 1 } func (r *ConsumerMetadataRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 2 } func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { switch r.Version { case 2: return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } golang-github-ibm-sarama-1.43.2/consumer_metadata_request_test.go000066400000000000000000000011711461256741300252010ustar00rootroot00000000000000package sarama import ( "testing" ) var ( consumerMetadataRequestEmpty = []byte{ 0x00, 0x00, } consumerMetadataRequestString = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', } ) func TestConsumerMetadataRequest(t *testing.T) { request := new(ConsumerMetadataRequest) testEncodable(t, "empty string", request, consumerMetadataRequestEmpty) testVersionDecodable(t, "empty string", request, consumerMetadataRequestEmpty, 0) request.ConsumerGroup = "foobar" testEncodable(t, "with string", request, consumerMetadataRequestString) testVersionDecodable(t, "with string", request, consumerMetadataRequestString, 0) } golang-github-ibm-sarama-1.43.2/consumer_metadata_response.go000066400000000000000000000040201461256741300243040ustar00rootroot00000000000000package sarama import ( "net" "strconv" ) // ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { Version int16 Err KError Coordinator *Broker CoordinatorID int32 // deprecated: use Coordinator.ID() CoordinatorHost string // deprecated: use Coordinator.Addr() CoordinatorPort int32 // deprecated: use Coordinator.Addr() } func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { tmp := new(FindCoordinatorResponse) if err := tmp.decode(pd, version); err != nil { return err } r.Err = tmp.Err r.Coordinator = tmp.Coordinator if tmp.Coordinator == nil { return nil } // this can all go away in 2.0, but we have to fill in deprecated fields to maintain // backwards compatibility host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) if err != nil { return err } port, err := strconv.ParseInt(portstr, 10, 32) if err != nil { return err } r.CoordinatorID = r.Coordinator.ID() r.CoordinatorHost = host r.CoordinatorPort = int32(port) return nil } func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { if r.Coordinator == nil { r.Coordinator = new(Broker) r.Coordinator.id = r.CoordinatorID r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) } tmp := &FindCoordinatorResponse{ Version: r.Version, Err: r.Err, Coordinator: r.Coordinator, } if err := tmp.encode(pe); err != nil { return err } return nil } func (r *ConsumerMetadataResponse) key() int16 { return 10 } func (r *ConsumerMetadataResponse) version() int16 { return r.Version } func (r *ConsumerMetadataResponse) headerVersion() int16 { return 0 } func (r *ConsumerMetadataResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 2 } func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { switch r.Version { case 2: return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } golang-github-ibm-sarama-1.43.2/consumer_metadata_response_test.go000066400000000000000000000022471461256741300253540ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var ( consumerMetadataResponseError = []byte{ 0x00, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } consumerMetadataResponseSuccess = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0xCC, 0xDD, } ) func TestConsumerMetadataResponseError(t *testing.T) { response := &ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} testEncodable(t, "", response, consumerMetadataResponseError) decodedResp := &ConsumerMetadataResponse{} if err := versionedDecode(consumerMetadataResponseError, decodedResp, 0, nil); err != nil { t.Error("could not decode: ", err) } if !errors.Is(decodedResp.Err, ErrOffsetsLoadInProgress) { t.Errorf("got %s, want %s", decodedResp.Err, ErrOffsetsLoadInProgress) } } func TestConsumerMetadataResponseSuccess(t *testing.T) { broker := NewBroker("foo:52445") broker.id = 0xAB response := ConsumerMetadataResponse{ Coordinator: broker, CoordinatorID: 0xAB, CoordinatorHost: "foo", CoordinatorPort: 0xCCDD, Err: ErrNoError, } testResponse(t, "success", &response, consumerMetadataResponseSuccess) } golang-github-ibm-sarama-1.43.2/consumer_test.go000066400000000000000000001766441461256741300216130ustar00rootroot00000000000000package sarama import ( "bytes" "errors" "log" "os" "os/signal" "reflect" "strconv" "sync/atomic" "testing" "time" ) var ( testMsg = StringEncoder("Foo") testKey = StringEncoder("Bar") ) // If a particular offset is provided then messages are consumed starting from // that offset. func TestConsumerOffsetManual(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) manualOffset := int64(1234) offsetNewest := int64(2345) offsetNewestAfterFetchRequest := int64(3456) mockFetchResponse := NewMockFetchResponse(t, 1) // skipped because parseRecords(): offset < child.offset mockFetchResponse.SetMessage("my_topic", 0, manualOffset-1, testMsg) for i := int64(0); i < 10; i++ { mockFetchResponse.SetMessage("my_topic", 0, i+manualOffset, testMsg) } mockFetchResponse.SetHighWaterMark("my_topic", 0, offsetNewestAfterFetchRequest) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, offsetNewest), "FetchRequest": mockFetchResponse, }) // When master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, manualOffset) if err != nil { t.Fatal(err) } // Then if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewest, hwmo) } for i := int64(0); i < 10; i++ { select { case message := <-consumer.Messages(): assertMessageOffset(t, message, i+manualOffset) case err := <-consumer.Errors(): t.Error(err) } } if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewestAfterFetchRequest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewestAfterFetchRequest, hwmo) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } // If a message is given a key, it can be correctly collected while consuming. func TestConsumerMessageWithKey(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) manualOffset := int64(1234) offsetNewest := int64(2345) offsetNewestAfterFetchRequest := int64(3456) mockFetchResponse := NewMockFetchResponse(t, 1) // skipped because parseRecords(): offset < child.offset mockFetchResponse.SetMessageWithKey("my_topic", 0, manualOffset-1, testKey, testMsg) for i := int64(0); i < 10; i++ { mockFetchResponse.SetMessageWithKey("my_topic", 0, i+manualOffset, testKey, testMsg) } mockFetchResponse.SetHighWaterMark("my_topic", 0, offsetNewestAfterFetchRequest) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, offsetNewest), "FetchRequest": mockFetchResponse, }) // When master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, manualOffset) if err != nil { t.Fatal(err) } // Then if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewest, hwmo) } for i := int64(0); i < 10; i++ { select { case message := <-consumer.Messages(): assertMessageOffset(t, message, i+manualOffset) assertMessageKey(t, message, testKey) assertMessageValue(t, message, testMsg) case err := <-consumer.Errors(): t.Error(err) } } if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewestAfterFetchRequest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewestAfterFetchRequest, hwmo) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } func TestPauseResumeConsumption(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) const newestOffsetBroker = 1233 const maxOffsetBroker = newestOffsetBroker + 10 offsetBroker := newestOffsetBroker offsetClient := offsetBroker mockFetchResponse := NewMockFetchResponse(t, 1) mockFetchResponse.SetMessage("my_topic", 0, int64(newestOffsetBroker), testMsg) offsetBroker++ brokerResponses := map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, int64(newestOffsetBroker)), "FetchRequest": mockFetchResponse, } broker0.SetHandlerByMap(brokerResponses) // When master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) if err != nil { t.Fatal(err) } // pause the consumption consumer.Pause() // set more msgs on broker for ; offsetBroker < maxOffsetBroker; offsetBroker++ { mockFetchResponse = mockFetchResponse.SetMessage("my_topic", 0, int64(offsetBroker), testMsg) } brokerResponses["FetchRequest"] = mockFetchResponse broker0.SetHandlerByMap(brokerResponses) keepConsuming := true for keepConsuming { select { case message := <-consumer.Messages(): // only the first msg is expected to be consumed offsetClient++ assertMessageOffset(t, message, int64(newestOffsetBroker)) case err := <-consumer.Errors(): t.Fatal(err) case <-time.After(time.Second): // is expected to timedout once the consumption is pauses keepConsuming = false } } // lets resume the consumption in order to consume the new msgs consumer.Resume() for offsetClient < maxOffsetBroker { select { case message := <-consumer.Messages(): assertMessageOffset(t, message, int64(offsetClient)) offsetClient += 1 case err := <-consumer.Errors(): t.Fatal("Error: ", err) case <-time.After(time.Second * 10): t.Fatal("consumer timed out . Offset: ", offsetClient) } } safeClose(t, consumer) safeClose(t, master) broker0.Close() } // If `OffsetNewest` is passed as the initial offset then the first consumed // message indeed corresponds to the offset that broker claims to be the // newest in its metadata response. func TestConsumerOffsetNewest(t *testing.T) { // Given offsetNewest := int64(10) offsetNewestAfterFetchRequest := int64(50) broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, offsetNewest). SetOffset("my_topic", 0, OffsetOldest, 7), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 9, testMsg). // skipped because parseRecords(): offset < child.offset SetMessage("my_topic", 0, 10, testMsg). SetMessage("my_topic", 0, 11, testMsg). SetHighWaterMark("my_topic", 0, offsetNewestAfterFetchRequest), }) master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) if err != nil { t.Fatal(err) } // Then if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewest, hwmo) } assertMessageOffset(t, <-consumer.Messages(), 10) if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewestAfterFetchRequest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewestAfterFetchRequest, hwmo) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } // If `OffsetOldest` is passed as the initial offset then the first consumed // message is indeed the first available in the partition. func TestConsumerOffsetOldest(t *testing.T) { // Given offsetNewest := int64(10) broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, offsetNewest). SetOffset("my_topic", 0, OffsetOldest, 7), "FetchRequest": NewMockFetchResponse(t, 1). // skipped because parseRecords(): offset < child.offset SetMessage("my_topic", 0, 6, testMsg). // these will get to the Messages() channel SetMessage("my_topic", 0, 7, testMsg). SetMessage("my_topic", 0, 8, testMsg). SetMessage("my_topic", 0, 9, testMsg). SetHighWaterMark("my_topic", 0, offsetNewest), }) master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, OffsetOldest) if err != nil { t.Fatal(err) } // Then if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewest, hwmo) } assertMessageOffset(t, <-consumer.Messages(), int64(7)) if hwmo := consumer.HighWaterMarkOffset(); hwmo != offsetNewest { t.Errorf("Expected high water mark offset %d, found %d", offsetNewest, hwmo) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } // It is possible to close a partition consumer and create the same anew. func TestConsumerRecreate(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, 1000), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 10, testMsg), }) c, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } pc, err := c.ConsumePartition("my_topic", 0, 10) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-pc.Messages(), 10) // When safeClose(t, pc) pc, err = c.ConsumePartition("my_topic", 0, 10) if err != nil { t.Fatal(err) } // Then assertMessageOffset(t, <-pc.Messages(), 10) safeClose(t, pc) safeClose(t, c) broker0.Close() } // An attempt to consume the same partition twice should fail. func TestConsumerDuplicate(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, 1000), "FetchRequest": NewMockFetchResponse(t, 1), }) config := NewTestConfig() config.ChannelBufferSize = 0 c, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } pc1, err := c.ConsumePartition("my_topic", 0, 0) if err != nil { t.Fatal(err) } // When pc2, err := c.ConsumePartition("my_topic", 0, 0) // Then var target ConfigurationError ok := errors.As(err, &target) if pc2 != nil || !ok || string(target) != "That topic/partition is already being consumed" { t.Fatal("A partition cannot be consumed twice at the same time") } safeClose(t, pc1) safeClose(t, c) broker0.Close() } func runConsumerLeaderRefreshErrorTestWithConfig(t *testing.T, config *Config) { // Given broker0 := NewMockBroker(t, 100) // Stage 1: my_topic/0 served by broker0 Logger.Printf(" STAGE 1") broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 123). SetOffset("my_topic", 0, OffsetNewest, 1000), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 123, testMsg), }) c, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-pc.Messages(), 123) // Stage 2: broker0 says that it is no longer the leader for my_topic/0, // but the requests to retrieve metadata fail with network timeout. Logger.Printf(" STAGE 2") fetchResponse2 := &FetchResponse{} fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) broker0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockWrapper(fetchResponse2), }) if consErr := <-pc.Errors(); !errors.Is(consErr.Err, ErrOutOfBrokers) { t.Errorf("Unexpected error: %v", consErr.Err) } // Stage 3: finally the metadata returned by broker0 tells that broker1 is // a new leader for my_topic/0. Consumption resumes. Logger.Printf(" STAGE 3") broker1 := NewMockBroker(t, 101) broker1.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 124, testMsg), }) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(broker1.Addr(), broker1.BrokerID()). SetLeader("my_topic", 0, broker1.BrokerID()), }) assertMessageOffset(t, <-pc.Messages(), 124) safeClose(t, pc) safeClose(t, c) broker1.Close() broker0.Close() } // If consumer fails to refresh metadata it keeps retrying with frequency // specified by `Config.Consumer.Retry.Backoff`. func TestConsumerLeaderRefreshError(t *testing.T) { config := NewTestConfig() config.Net.ReadTimeout = 100 * time.Millisecond config.Consumer.Retry.Backoff = 200 * time.Millisecond config.Consumer.Return.Errors = true config.Metadata.Retry.Max = 0 runConsumerLeaderRefreshErrorTestWithConfig(t, config) } func TestConsumerLeaderRefreshErrorWithBackoffFunc(t *testing.T) { var calls int32 = 0 config := NewTestConfig() config.Net.ReadTimeout = 100 * time.Millisecond config.Consumer.Retry.BackoffFunc = func(retries int) time.Duration { atomic.AddInt32(&calls, 1) return 200 * time.Millisecond } config.Consumer.Return.Errors = true config.Metadata.Retry.Max = 0 runConsumerLeaderRefreshErrorTestWithConfig(t, config) // we expect at least one call to our backoff function if calls == 0 { t.Fail() } } func TestConsumerInvalidTopic(t *testing.T) { // Given broker0 := NewMockBroker(t, 100) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()), }) c, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } // When pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) // Then if pc != nil || !errors.Is(err, ErrUnknownTopicOrPartition) { t.Errorf("Should fail with, err=%v", err) } safeClose(t, c) broker0.Close() } // Nothing bad happens if a partition consumer that has no leader assigned at // the moment is closed. func TestConsumerClosePartitionWithoutLeader(t *testing.T) { // Given broker0 := NewMockBroker(t, 100) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 123). SetOffset("my_topic", 0, OffsetNewest, 1000), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 123, testMsg), }) config := NewTestConfig() config.Net.ReadTimeout = 100 * time.Millisecond config.Consumer.Retry.Backoff = 100 * time.Millisecond config.Consumer.Return.Errors = true config.Metadata.Retry.Max = 0 c, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-pc.Messages(), 123) // broker0 says that it is no longer the leader for my_topic/0, but the // requests to retrieve metadata fail with network timeout. fetchResponse2 := &FetchResponse{} fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) broker0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockWrapper(fetchResponse2), }) // When if consErr := <-pc.Errors(); !errors.Is(consErr.Err, ErrOutOfBrokers) { t.Errorf("Unexpected error: %v", consErr.Err) } // Then: the partition consumer can be closed without any problem. safeClose(t, pc) safeClose(t, c) broker0.Close() } // If the initial offset passed on partition consumer creation is out of the // actual offset range for the partition, then the partition consumer stops // immediately closing its output channels. func TestConsumerShutsDownOutOfRange(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) fetchResponse := new(FetchResponse) fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 7), "FetchRequest": NewMockWrapper(fetchResponse), }) master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 101) if err != nil { t.Fatal(err) } // Then: consumer should shut down closing its messages and errors channels. if _, ok := <-consumer.Messages(); ok { t.Error("Expected the consumer to shut down") } safeClose(t, consumer) safeClose(t, master) broker0.Close() } // If a fetch response contains messages with offsets that are smaller then // requested, then such messages are ignored. func TestConsumerExtraOffsets(t *testing.T) { // Given legacyFetchResponse := &FetchResponse{} legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1) legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2) legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3) legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4) newFetchResponse := &FetchResponse{Version: 5} newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1) newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2) newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3) newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4) newFetchResponse.SetLastOffsetDelta("my_topic", 0, 4) newFetchResponse.SetLastStableOffset("my_topic", 0, 4) for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} { cfg := NewTestConfig() cfg.Consumer.Return.Errors = true if fetchResponse1.Version >= 5 { cfg.Version = V0_11_0_0 } broker0 := NewMockBroker(t, 0) fetchResponse2 := &FetchResponse{} fetchResponse2.Version = fetchResponse1.Version fetchResponse2.AddError("my_topic", 0, ErrNoError) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 3) if err != nil { t.Fatal(err) } // Then: messages with offsets 1 and 2 are not returned even though they // are present in the response. select { case msg := <-consumer.Messages(): assertMessageOffset(t, msg, 3) case err := <-consumer.Errors(): t.Fatal(err) } select { case msg := <-consumer.Messages(): assertMessageOffset(t, msg, 4) case err := <-consumer.Errors(): t.Fatal(err) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } } // In some situations broker may return a block containing only // messages older then requested, even though there would be // more messages if higher offset was requested. func TestConsumerReceivingFetchResponseWithTooOldRecords(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 5} fetchResponse1.AddRecord("my_topic", 0, nil, testMsg, 1) fetchResponse2 := &FetchResponse{Version: 5} fetchResponse2.AddRecord("my_topic", 0, nil, testMsg, 1000000) cfg := NewTestConfig() cfg.Consumer.Return.Errors = true cfg.Version = V0_11_0_0 broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 2) if err != nil { t.Fatal(err) } select { case msg := <-consumer.Messages(): assertMessageOffset(t, msg, 1000000) case err := <-consumer.Errors(): t.Fatal(err) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 5} fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) cfg := NewTestConfig() cfg.Version = V0_11_0_0 broker0 := NewMockBroker(t, 0) fetchResponse2 := &FetchResponse{} fetchResponse2.Version = 4 fetchResponse2.AddError("my_topic", 0, ErrNoError) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) safeClose(t, consumer) safeClose(t, master) broker0.Close() } func TestConsumeMessageWithSessionIDs(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 7} fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) cfg := NewTestConfig() cfg.Version = V1_1_0_0 broker0 := NewMockBroker(t, 0) fetchResponse2 := &FetchResponse{} fetchResponse2.Version = 7 fetchResponse2.AddError("my_topic", 0, ErrNoError) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) safeClose(t, consumer) safeClose(t, master) broker0.Close() fetchReq := broker0.History()[3].Request.(*FetchRequest) if fetchReq.SessionID != 0 || fetchReq.SessionEpoch != -1 { t.Error("Expected session ID to be zero & Epoch to be -1") } } func TestConsumeMessagesFromReadReplica(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 11} fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) block1 := fetchResponse1.GetBlock("my_topic", 0) block1.PreferredReadReplica = -1 fetchResponse2 := &FetchResponse{Version: 11} // Create a block with no records. block2 := fetchResponse1.getOrCreateBlock("my_topic", 0) block2.PreferredReadReplica = 1 fetchResponse3 := &FetchResponse{Version: 11} fetchResponse3.AddMessage("my_topic", 0, nil, testMsg, 3) fetchResponse3.AddMessage("my_topic", 0, nil, testMsg, 4) block3 := fetchResponse3.GetBlock("my_topic", 0) block3.PreferredReadReplica = -1 fetchResponse4 := &FetchResponse{Version: 11} fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 5) fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 6) block4 := fetchResponse4.GetBlock("my_topic", 0) block4.PreferredReadReplica = -1 cfg := NewTestConfig() cfg.Version = V2_3_0_0 cfg.RackID = "consumer_rack" leader := NewMockBroker(t, 0) broker0 := NewMockBroker(t, 1) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse3, fetchResponse4), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) assertMessageOffset(t, <-consumer.Messages(), 3) assertMessageOffset(t, <-consumer.Messages(), 4) safeClose(t, consumer) safeClose(t, master) broker0.Close() leader.Close() } func TestConsumeMessagesFromReadReplicaLeaderFallback(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 11} fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) block1 := fetchResponse1.GetBlock("my_topic", 0) block1.PreferredReadReplica = 5 // Does not exist. fetchResponse2 := &FetchResponse{Version: 11} fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 3) fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 4) block2 := fetchResponse2.GetBlock("my_topic", 0) block2.PreferredReadReplica = -1 cfg := NewTestConfig() cfg.Version = V2_3_0_0 cfg.RackID = "consumer_rack" leader := NewMockBroker(t, 0) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{leader.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) assertMessageOffset(t, <-consumer.Messages(), 3) assertMessageOffset(t, <-consumer.Messages(), 4) safeClose(t, consumer) safeClose(t, master) leader.Close() } func TestConsumeMessagesFromReadReplicaErrorReplicaNotAvailable(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 11} block1 := fetchResponse1.getOrCreateBlock("my_topic", 0) block1.PreferredReadReplica = 1 fetchResponse2 := &FetchResponse{Version: 11} fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 2) block2 := fetchResponse2.GetBlock("my_topic", 0) block2.PreferredReadReplica = -1 fetchResponse3 := &FetchResponse{Version: 11} fetchResponse3.AddError("my_topic", 0, ErrReplicaNotAvailable) fetchResponse4 := &FetchResponse{Version: 11} fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 3) fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 4) cfg := NewTestConfig() cfg.Version = V2_3_0_0 cfg.RackID = "consumer_rack" leader := NewMockBroker(t, 0) broker0 := NewMockBroker(t, 1) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse4), }) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse2, fetchResponse3), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) assertMessageOffset(t, <-consumer.Messages(), 3) assertMessageOffset(t, <-consumer.Messages(), 4) safeClose(t, consumer) safeClose(t, master) broker0.Close() leader.Close() } func TestConsumeMessagesFromReadReplicaErrorUnknown(t *testing.T) { // Given fetchResponse1 := &FetchResponse{Version: 11} block1 := fetchResponse1.getOrCreateBlock("my_topic", 0) block1.PreferredReadReplica = 1 fetchResponse2 := &FetchResponse{Version: 11} fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 1) fetchResponse2.AddMessage("my_topic", 0, nil, testMsg, 2) block2 := fetchResponse2.GetBlock("my_topic", 0) block2.PreferredReadReplica = -1 fetchResponse3 := &FetchResponse{Version: 11} fetchResponse3.AddError("my_topic", 0, ErrUnknown) fetchResponse4 := &FetchResponse{Version: 11} fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 3) fetchResponse4.AddMessage("my_topic", 0, nil, testMsg, 4) cfg := NewTestConfig() cfg.Version = V2_3_0_0 cfg.RackID = "consumer_rack" leader := NewMockBroker(t, 0) broker0 := NewMockBroker(t, 1) leader.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse4), }) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(leader.Addr(), leader.BrokerID()). SetLeader("my_topic", 0, leader.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse2, fetchResponse3), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-consumer.Messages(), 1) assertMessageOffset(t, <-consumer.Messages(), 2) assertMessageOffset(t, <-consumer.Messages(), 3) assertMessageOffset(t, <-consumer.Messages(), 4) safeClose(t, consumer) safeClose(t, master) broker0.Close() leader.Close() } // TestConsumeMessagesTrackLeader ensures that in the event that leadership of // a topicPartition changes and no preferredReadReplica is specified, the // consumer connects back to the new leader to resume consumption and doesn't // continue consuming from the follower. // // See https://github.com/IBM/sarama/issues/1927 func TestConsumeMessagesTrackLeader(t *testing.T) { cfg := NewTestConfig() cfg.ClientID = t.Name() cfg.Metadata.RefreshFrequency = time.Millisecond * 50 cfg.Consumer.Retry.Backoff = 0 cfg.Net.MaxOpenRequests = 1 cfg.Version = V2_1_0_0 leader1 := NewMockBroker(t, 1) leader2 := NewMockBroker(t, 2) mockMetadataResponse1 := NewMockMetadataResponse(t). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(leader2.Addr(), leader2.BrokerID()). SetLeader("my_topic", 0, leader1.BrokerID()) mockMetadataResponse2 := NewMockMetadataResponse(t). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(leader2.Addr(), leader2.BrokerID()). SetLeader("my_topic", 0, leader2.BrokerID()) mockMetadataResponse3 := NewMockMetadataResponse(t). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(leader2.Addr(), leader2.BrokerID()). SetLeader("my_topic", 0, leader1.BrokerID()) leader1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse1, "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 1, testMsg). SetMessage("my_topic", 0, 2, testMsg), }) leader2.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse1, }) client, err := NewClient([]string{leader1.Addr()}, cfg) if err != nil { t.Fatal(err) } consumer, err := NewConsumerFromClient(client) if err != nil { t.Fatal(err) } pConsumer, err := consumer.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } assertMessageOffset(t, <-pConsumer.Messages(), 1) assertMessageOffset(t, <-pConsumer.Messages(), 2) fetchEmptyResponse := &FetchResponse{Version: 10} fetchEmptyResponse.AddError("my_topic", 0, ErrNoError) leader1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse2, "FetchRequest": NewMockWrapper(fetchEmptyResponse), }) leader2.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse2, "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 3, testMsg). SetMessage("my_topic", 0, 4, testMsg), }) // wait for client to be aware that leadership has changed for { b, _ := client.Leader("my_topic", 0) if b.ID() == int32(2) { break } time.Sleep(time.Millisecond * 50) } assertMessageOffset(t, <-pConsumer.Messages(), 3) assertMessageOffset(t, <-pConsumer.Messages(), 4) leader1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse3, "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 5, testMsg). SetMessage("my_topic", 0, 6, testMsg), }) leader2.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse3, "FetchRequest": NewMockWrapper(fetchEmptyResponse), }) // wait for client to be aware that leadership has changed back again for { b, _ := client.Leader("my_topic", 0) if b.ID() == int32(1) { break } time.Sleep(time.Millisecond * 50) } assertMessageOffset(t, <-pConsumer.Messages(), 5) assertMessageOffset(t, <-pConsumer.Messages(), 6) safeClose(t, pConsumer) safeClose(t, consumer) safeClose(t, client) leader1.Close() leader2.Close() } // It is fine if offsets of fetched messages are not sequential (although // strictly increasing!). func TestConsumerNonSequentialOffsets(t *testing.T) { // Given legacyFetchResponse := &FetchResponse{} legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5) legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7) legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11) newFetchResponse := &FetchResponse{Version: 5} newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5) newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7) newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11) newFetchResponse.SetLastOffsetDelta("my_topic", 0, 11) newFetchResponse.SetLastStableOffset("my_topic", 0, 11) for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} { cfg := NewTestConfig() if fetchResponse1.Version >= 4 { cfg.Version = V0_11_0_0 } broker0 := NewMockBroker(t, 0) fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version} fetchResponse2.AddError("my_topic", 0, ErrNoError) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 3) if err != nil { t.Fatal(err) } // Then: messages with offsets 1 and 2 are not returned even though they // are present in the response. assertMessageOffset(t, <-consumer.Messages(), 5) assertMessageOffset(t, <-consumer.Messages(), 7) assertMessageOffset(t, <-consumer.Messages(), 11) safeClose(t, consumer) safeClose(t, master) broker0.Close() } } // If leadership for a partition is changing then consumer resolves the new // leader and switches to it. func TestConsumerRebalancingMultiplePartitions(t *testing.T) { // initial setup seedBroker := NewMockBroker(t, 10) leader0 := NewMockBroker(t, 0) leader1 := NewMockBroker(t, 1) seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(leader0.Addr(), leader0.BrokerID()). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetLeader("my_topic", 0, leader0.BrokerID()). SetLeader("my_topic", 1, leader1.BrokerID()), }) mockOffsetResponse1 := NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, 1000). SetOffset("my_topic", 1, OffsetOldest, 0). SetOffset("my_topic", 1, OffsetNewest, 1000) leader0.SetHandlerByMap(map[string]MockResponse{ "OffsetRequest": mockOffsetResponse1, "FetchRequest": NewMockFetchResponse(t, 1), }) leader1.SetHandlerByMap(map[string]MockResponse{ "OffsetRequest": mockOffsetResponse1, "FetchRequest": NewMockFetchResponse(t, 1), }) // launch test goroutines config := NewTestConfig() config.ClientID = t.Name() config.Consumer.Retry.Backoff = 50 master, err := NewConsumer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } consumers := map[int32]PartitionConsumer{} checkMessage := func(partition int32, offset int) { c := consumers[partition] message := <-c.Messages() t.Logf("Received message my_topic-%d offset=%d", partition, message.Offset) if message.Offset != int64(offset) { t.Error("Incorrect message offset!", offset, partition, message.Offset) } if message.Partition != partition { t.Error("Incorrect message partition!") } } for i := int32(0); i < 2; i++ { consumer, err := master.ConsumePartition("my_topic", i, 0) if err != nil { t.Fatal(err) } go func(c PartitionConsumer) { for err := range c.Errors() { t.Error(err) } }(consumer) consumers[i] = consumer } time.Sleep(50 * time.Millisecond) t.Log(` STAGE 1: * my_topic/0 -> leader0 will serve 4 messages * my_topic/1 -> leader1 will serve 0 messages`) mockFetchResponse := NewMockFetchResponse(t, 1) for i := 0; i < 4; i++ { mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) } leader0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": mockFetchResponse, }) for i := 0; i < 4; i++ { checkMessage(0, i) } time.Sleep(50 * time.Millisecond) t.Log(` STAGE 2: * my_topic/0 -> leader0 will return NotLeaderForPartition seedBroker will give leader1 as serving my_topic/0 now * my_topic/1 -> leader1 will serve 0 messages`) // leader0 says no longer leader of partition 0 fetchResponse := new(FetchResponse) fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) metadataResponse := NewMockMetadataResponse(t). SetLeader("my_topic", 0, leader1.BrokerID()). SetLeader("my_topic", 1, leader1.BrokerID()). SetBroker(leader0.Addr(), leader0.BrokerID()). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()) leader0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockWrapper(fetchResponse), "MetadataRequest": metadataResponse, }) leader1.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockFetchResponse(t, 1), "MetadataRequest": metadataResponse, }) time.Sleep(50 * time.Millisecond) t.Log(` STAGE 3: * my_topic/0 -> leader1 will serve 3 messages * my_topic/1 -> leader1 will serve 8 messages`) // leader1 provides 3 message on partition 0, and 8 messages on partition 1 mockFetchResponse2 := NewMockFetchResponse(t, 11) for i := 4; i < 7; i++ { mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) } for i := 0; i < 8; i++ { mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) } leader1.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": mockFetchResponse2, "MetadataRequest": metadataResponse, }) for i := 0; i < 8; i++ { checkMessage(1, i) } for i := 4; i < 7; i++ { checkMessage(0, i) } time.Sleep(50 * time.Millisecond) t.Log(` STAGE 4: * my_topic/0 -> leader1 will serve 3 messages * my_topic/1 -> leader1 will return NotLeaderForPartition seedBroker will give leader0 as serving my_topic/1 now`) metadataResponse2 := NewMockMetadataResponse(t). SetLeader("my_topic", 0, leader1.BrokerID()). SetLeader("my_topic", 1, leader0.BrokerID()). SetBroker(leader0.Addr(), leader0.BrokerID()). SetBroker(leader1.Addr(), leader1.BrokerID()). SetBroker(seedBroker.Addr(), seedBroker.BrokerID()) leader0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockFetchResponse(t, 1), }) leader1.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockFetchResponse(t, 1), }) // leader1 provides three more messages on partition0, says no longer leader of partition1 mockFetchResponse3 := NewMockFetchResponse(t, 3). SetMessage("my_topic", 0, int64(7), testMsg). SetMessage("my_topic", 0, int64(8), testMsg). SetMessage("my_topic", 0, int64(9), testMsg) fetchResponse4 := new(FetchResponse) fetchResponse4.AddError("my_topic", 0, ErrNoError) fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition) leader1.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4), "MetadataRequest": metadataResponse2, }) leader0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": NewMockFetchResponse(t, 1), "MetadataRequest": metadataResponse2, }) t.Log(` STAGE 5: * my_topic/0 -> leader1 will serve 0 messages * my_topic/1 -> leader0 will serve 2 messages`) // leader0 provides two messages on partition 1 mockFetchResponse4 := NewMockFetchResponse(t, 2) for i := 8; i < 10; i++ { mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) } leader0.SetHandlerByMap(map[string]MockResponse{ "FetchRequest": mockFetchResponse4, "MetadataRequest": metadataResponse2, }) for i := 7; i < 10; i++ { checkMessage(0, i) } for i := 8; i < 10; i++ { checkMessage(1, i) } for _, pc := range consumers { safeClose(t, pc) } safeClose(t, master) leader1.Close() leader0.Close() seedBroker.Close() } // When two partitions have the same broker as the leader, if one partition // consumer channel buffer is full then that does not affect the ability to // read messages by the other consumer. func TestConsumerInterleavedClose(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()). SetLeader("my_topic", 1, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 1000). SetOffset("my_topic", 0, OffsetNewest, 1100). SetOffset("my_topic", 1, OffsetOldest, 2000). SetOffset("my_topic", 1, OffsetNewest, 2100), "FetchRequest": NewMockFetchResponse(t, 1). SetMessage("my_topic", 0, 1000, testMsg). SetMessage("my_topic", 0, 1001, testMsg). SetMessage("my_topic", 0, 1002, testMsg). SetMessage("my_topic", 1, 2000, testMsg), }) config := NewTestConfig() config.ChannelBufferSize = 0 master, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } c0, err := master.ConsumePartition("my_topic", 0, 1000) if err != nil { t.Fatal(err) } c1, err := master.ConsumePartition("my_topic", 1, 2000) if err != nil { t.Fatal(err) } // When/Then: we can read from partition 0 even if nobody reads from partition 1 assertMessageOffset(t, <-c0.Messages(), 1000) assertMessageOffset(t, <-c0.Messages(), 1001) assertMessageOffset(t, <-c0.Messages(), 1002) safeClose(t, c1) safeClose(t, c0) safeClose(t, master) broker0.Close() } func TestConsumerBounceWithReferenceOpen(t *testing.T) { broker0 := NewMockBroker(t, 0) broker0Addr := broker0.Addr() broker1 := NewMockBroker(t, 1) mockMetadataResponse := NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetBroker(broker1.Addr(), broker1.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()). SetLeader("my_topic", 1, broker1.BrokerID()) mockOffsetResponse := NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 1000). SetOffset("my_topic", 0, OffsetNewest, 1100). SetOffset("my_topic", 1, OffsetOldest, 2000). SetOffset("my_topic", 1, OffsetNewest, 2100) mockFetchResponse := NewMockFetchResponse(t, 1) for i := 0; i < 10; i++ { mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) } broker0.SetHandlerByMap(map[string]MockResponse{ "OffsetRequest": mockOffsetResponse, "FetchRequest": mockFetchResponse, }) broker1.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse, "OffsetRequest": mockOffsetResponse, "FetchRequest": mockFetchResponse, }) config := NewTestConfig() config.Consumer.Return.Errors = true config.Consumer.Retry.Backoff = 100 * time.Millisecond config.ChannelBufferSize = 1 master, err := NewConsumer([]string{broker1.Addr()}, config) if err != nil { t.Fatal(err) } c0, err := master.ConsumePartition("my_topic", 0, 1000) if err != nil { t.Fatal(err) } c1, err := master.ConsumePartition("my_topic", 1, 2000) if err != nil { t.Fatal(err) } // read messages from both partition to make sure that both brokers operate // normally. assertMessageOffset(t, <-c0.Messages(), 1000) assertMessageOffset(t, <-c1.Messages(), 2000) // Simulate broker shutdown. Note that metadata response does not change, // that is the leadership does not move to another broker. So partition // consumer will keep retrying to restore the connection with the broker. broker0.Close() // Make sure that while the partition/0 leader is down, consumer/partition/1 // is capable of pulling messages from broker1. for i := 1; i < 7; i++ { offset := (<-c1.Messages()).Offset if offset != int64(2000+i) { t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) } } // Bring broker0 back to service. broker0 = NewMockBrokerAddr(t, 0, broker0Addr) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": mockMetadataResponse, "FetchRequest": mockFetchResponse, }) // Read the rest of messages from both partitions. for i := 7; i < 10; i++ { assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) } for i := 1; i < 10; i++ { assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) } select { case <-c0.Errors(): default: t.Errorf("Partition consumer should have detected broker restart") } safeClose(t, c1) safeClose(t, c0) safeClose(t, master) broker0.Close() broker1.Close() } func TestConsumerOffsetOutOfRange(t *testing.T) { // Given broker0 := NewMockBroker(t, 2) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 2345), }) master, err := NewConsumer([]string{broker0.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } // When/Then if _, err := master.ConsumePartition("my_topic", 0, 0); !errors.Is(err, ErrOffsetOutOfRange) { t.Fatal("Should return ErrOffsetOutOfRange, got:", err) } if _, err := master.ConsumePartition("my_topic", 0, 3456); !errors.Is(err, ErrOffsetOutOfRange) { t.Fatal("Should return ErrOffsetOutOfRange, got:", err) } if _, err := master.ConsumePartition("my_topic", 0, -3); !errors.Is(err, ErrOffsetOutOfRange) { t.Fatal("Should return ErrOffsetOutOfRange, got:", err) } safeClose(t, master) broker0.Close() } func TestConsumerExpiryTicker(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) fetchResponse1 := &FetchResponse{} for i := 1; i <= 8; i++ { fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i)) } broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 1), "FetchRequest": NewMockSequence(fetchResponse1), }) config := NewTestConfig() config.ChannelBufferSize = 0 config.Consumer.MaxProcessingTime = 10 * time.Millisecond master, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } // When consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } // Then: messages with offsets 1 through 8 are read for i := 1; i <= 8; i++ { assertMessageOffset(t, <-consumer.Messages(), int64(i)) time.Sleep(2 * time.Millisecond) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } func TestConsumerTimestamps(t *testing.T) { now := time.Now().Truncate(time.Millisecond) type testMessage struct { key Encoder offset int64 timestamp time.Time } for _, d := range []struct { kversion KafkaVersion logAppendTime bool messages []testMessage expectedTimestamp []time.Time }{ {MinVersion, false, []testMessage{ {testMsg, 1, now}, {testMsg, 2, now}, }, []time.Time{{}, {}}}, {V0_9_0_0, false, []testMessage{ {testMsg, 1, now}, {testMsg, 2, now}, }, []time.Time{{}, {}}}, {V0_10_0_0, false, []testMessage{ {testMsg, 1, now}, {testMsg, 2, now}, }, []time.Time{{}, {}}}, {V0_10_2_1, false, []testMessage{ {testMsg, 1, now.Add(time.Second)}, {testMsg, 2, now.Add(2 * time.Second)}, }, []time.Time{now.Add(time.Second), now.Add(2 * time.Second)}}, {V0_10_2_1, true, []testMessage{ {testMsg, 1, now.Add(time.Second)}, {testMsg, 2, now.Add(2 * time.Second)}, }, []time.Time{now, now}}, {V0_11_0_0, false, []testMessage{ {testMsg, 1, now.Add(time.Second)}, {testMsg, 2, now.Add(2 * time.Second)}, }, []time.Time{now.Add(time.Second), now.Add(2 * time.Second)}}, {V0_11_0_0, true, []testMessage{ {testMsg, 1, now.Add(time.Second)}, {testMsg, 2, now.Add(2 * time.Second)}, }, []time.Time{now, now}}, } { var fr *FetchResponse cfg := NewTestConfig() cfg.Version = d.kversion switch { case d.kversion.IsAtLeast(V0_11_0_0): fr = &FetchResponse{Version: 5, LogAppendTime: d.logAppendTime, Timestamp: now} for _, m := range d.messages { fr.AddRecordWithTimestamp("my_topic", 0, m.key, testMsg, m.offset, m.timestamp) } fr.SetLastOffsetDelta("my_topic", 0, 2) fr.SetLastStableOffset("my_topic", 0, 2) case d.kversion.IsAtLeast(V0_10_1_0): fr = &FetchResponse{Version: 3, LogAppendTime: d.logAppendTime, Timestamp: now} for _, m := range d.messages { fr.AddMessageWithTimestamp("my_topic", 0, m.key, testMsg, m.offset, m.timestamp, 1) } default: var version int16 switch { case d.kversion.IsAtLeast(V0_10_0_0): version = 2 case d.kversion.IsAtLeast(V0_9_0_0): version = 1 } fr = &FetchResponse{Version: version} for _, m := range d.messages { fr.AddMessageWithTimestamp("my_topic", 0, m.key, testMsg, m.offset, m.timestamp, 0) } } broker0 := NewMockBroker(t, 0) broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetNewest, 1234). SetOffset("my_topic", 0, OffsetOldest, 0), "FetchRequest": NewMockSequence(fr), }) master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, 1) if err != nil { t.Fatal(err) } for i, ts := range d.expectedTimestamp { select { case msg := <-consumer.Messages(): assertMessageOffset(t, msg, int64(i)+1) if !msg.Timestamp.Equal(ts) { t.Errorf("Wrong timestamp (kversion:%v, logAppendTime:%v): got: %v, want: %v", d.kversion, d.logAppendTime, msg.Timestamp, ts) } case err := <-consumer.Errors(): t.Fatal(err) } } safeClose(t, consumer) safeClose(t, master) broker0.Close() } } // When set to ReadCommitted, no uncommitted message should be available in messages channel func TestExcludeUncommitted(t *testing.T) { // Given broker0 := NewMockBroker(t, 0) fetchResponse := &FetchResponse{ Version: 5, Blocks: map[string]map[int32]*FetchResponseBlock{"my_topic": {0: { AbortedTransactions: []*AbortedTransaction{{ProducerID: 7, FirstOffset: 1235}}, }}}, } fetchResponse.AddRecordBatch("my_topic", 0, nil, testMsg, 1234, 7, true) // committed msg fetchResponse.AddRecordBatch("my_topic", 0, nil, testMsg, 1235, 7, true) // uncommitted msg fetchResponse.AddRecordBatch("my_topic", 0, nil, testMsg, 1236, 7, true) // uncommitted msg fetchResponse.AddControlRecord("my_topic", 0, 1237, 7, ControlRecordAbort) // abort control record fetchResponse.AddRecordBatch("my_topic", 0, nil, testMsg, 1238, 7, true) // committed msg broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, 1237), "FetchRequest": NewMockWrapper(fetchResponse), }) cfg := NewTestConfig() cfg.Consumer.Return.Errors = true cfg.Version = V0_11_0_0 cfg.Consumer.IsolationLevel = ReadCommitted // When master, err := NewConsumer([]string{broker0.Addr()}, cfg) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, 1234) if err != nil { t.Fatal(err) } // Then: only the 2 committed messages are returned select { case message := <-consumer.Messages(): assertMessageOffset(t, message, int64(1234)) case err := <-consumer.Errors(): t.Error(err) } select { case message := <-consumer.Messages(): assertMessageOffset(t, message, int64(1238)) case err := <-consumer.Errors(): t.Error(err) } safeClose(t, consumer) safeClose(t, master) broker0.Close() } func assertMessageKey(t *testing.T, msg *ConsumerMessage, expectedKey Encoder) { t.Helper() wantKey, _ := expectedKey.Encode() if !bytes.Equal(msg.Key, wantKey) { t.Fatalf("Incorrect key for message. expected=%s, actual=%s", expectedKey, msg.Key) } } func assertMessageValue(t *testing.T, msg *ConsumerMessage, expectedValue Encoder) { t.Helper() wantValue, _ := expectedValue.Encode() if !bytes.Equal(msg.Value, wantValue) { t.Fatalf("Incorrect value for message. expected=%s, actual=%s", expectedValue, msg.Key) } } func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { t.Helper() if msg.Offset != expectedOffset { t.Fatalf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) } } // This example shows how to use the consumer to read messages // from a single partition. func ExampleConsumer() { consumer, err := NewConsumer([]string{"localhost:9092"}, NewTestConfig()) if err != nil { panic(err) } defer func() { if err := consumer.Close(); err != nil { log.Fatalln(err) } }() partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest) if err != nil { panic(err) } defer func() { if err := partitionConsumer.Close(); err != nil { log.Fatalln(err) } }() // Trap SIGINT to trigger a shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) consumed := 0 ConsumerLoop: for { select { case msg := <-partitionConsumer.Messages(): log.Printf("Consumed message offset %d\n", msg.Offset) consumed++ case <-signals: break ConsumerLoop } } log.Printf("Consumed: %d\n", consumed) } func Test_partitionConsumer_parseResponse(t *testing.T) { type args struct { response *FetchResponse } tests := []struct { name string args args want []*ConsumerMessage wantErr bool }{ { name: "empty but throttled FetchResponse is not considered an error", args: args{ response: &FetchResponse{ ThrottleTime: time.Millisecond, }, }, }, { name: "empty FetchResponse is considered an incomplete response by default", args: args{ response: &FetchResponse{}, }, wantErr: true, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { child := &partitionConsumer{ broker: &brokerConsumer{ broker: &Broker{}, }, conf: &Config{}, } got, err := child.parseResponse(tt.args.response) if (err != nil) != tt.wantErr { t.Errorf("partitionConsumer.parseResponse() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("partitionConsumer.parseResponse() = %v, want %v", got, tt.want) } }) } } func Test_partitionConsumer_parseResponseEmptyBatch(t *testing.T) { lrbOffset := int64(5) block := &FetchResponseBlock{ HighWaterMarkOffset: 10, LastStableOffset: 10, LastRecordsBatchOffset: &lrbOffset, LogStartOffset: 0, } response := &FetchResponse{ Blocks: map[string]map[int32]*FetchResponseBlock{"my_topic": {0: block}}, Version: 2, } child := &partitionConsumer{ broker: &brokerConsumer{ broker: &Broker{}, }, conf: NewTestConfig(), topic: "my_topic", partition: 0, } got, err := child.parseResponse(response) if err != nil { t.Errorf("partitionConsumer.parseResponse() error = %v", err) return } if got != nil { t.Errorf("partitionConsumer.parseResponse() should be nil, got %v", got) } if child.offset != 6 { t.Errorf("child.offset should be LastRecordsBatchOffset + 1: %d, got %d", lrbOffset+1, child.offset) } } func testConsumerInterceptor( t *testing.T, interceptors []ConsumerInterceptor, expectationFn func(*testing.T, int, *ConsumerMessage), ) { // Given broker0 := NewMockBroker(t, 0) mockFetchResponse := NewMockFetchResponse(t, 1) for i := 0; i < 10; i++ { mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) } broker0.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetBroker(broker0.Addr(), broker0.BrokerID()). SetLeader("my_topic", 0, broker0.BrokerID()), "OffsetRequest": NewMockOffsetResponse(t). SetOffset("my_topic", 0, OffsetOldest, 0). SetOffset("my_topic", 0, OffsetNewest, 0), "FetchRequest": mockFetchResponse, }) config := NewTestConfig() config.Consumer.Interceptors = interceptors // When master, err := NewConsumer([]string{broker0.Addr()}, config) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("my_topic", 0, 0) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { select { case msg := <-consumer.Messages(): expectationFn(t, i, msg) case err := <-consumer.Errors(): t.Error(err) } } safeClose(t, consumer) safeClose(t, master) broker0.Close() } func TestConsumerInterceptors(t *testing.T) { tests := []struct { name string interceptors []ConsumerInterceptor expectationFn func(*testing.T, int, *ConsumerMessage) }{ { name: "intercept messages", interceptors: []ConsumerInterceptor{&appendInterceptor{i: 0}}, expectationFn: func(t *testing.T, i int, msg *ConsumerMessage) { ev, _ := testMsg.Encode() expected := string(ev) + strconv.Itoa(i) v := string(msg.Value) if v != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain", interceptors: []ConsumerInterceptor{&appendInterceptor{i: 0}, &appendInterceptor{i: 1000}}, expectationFn: func(t *testing.T, i int, msg *ConsumerMessage) { ev, _ := testMsg.Encode() expected := string(ev) + strconv.Itoa(i) + strconv.Itoa(i+1000) v := string(msg.Value) if v != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain with one interceptor failing", interceptors: []ConsumerInterceptor{&appendInterceptor{i: -1}, &appendInterceptor{i: 1000}}, expectationFn: func(t *testing.T, i int, msg *ConsumerMessage) { ev, _ := testMsg.Encode() expected := string(ev) + strconv.Itoa(i+1000) v := string(msg.Value) if v != expected { t.Errorf("Interceptor should have not changed the value, got %s, expected %s", v, expected) } }, }, { name: "interceptor chain with all interceptors failing", interceptors: []ConsumerInterceptor{&appendInterceptor{i: -1}, &appendInterceptor{i: -1}}, expectationFn: func(t *testing.T, i int, msg *ConsumerMessage) { ev, _ := testMsg.Encode() expected := string(ev) v := string(msg.Value) if v != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { testConsumerInterceptor(t, tt.interceptors, tt.expectationFn) }) } } func TestConsumerError(t *testing.T) { t.Parallel() err := ConsumerError{Err: ErrOutOfBrokers} if !errors.Is(err, ErrOutOfBrokers) { t.Error("unexpected errors.Is") } } golang-github-ibm-sarama-1.43.2/control_record.go000066400000000000000000000035201461256741300217150ustar00rootroot00000000000000package sarama // ControlRecordType ... type ControlRecordType int const ( // ControlRecordAbort is a control record for abort ControlRecordAbort ControlRecordType = iota // ControlRecordCommit is a control record for commit ControlRecordCommit // ControlRecordUnknown is a control record of unknown type ControlRecordUnknown ) // Control records are returned as a record by fetchRequest // However unlike "normal" records, they mean nothing application wise. // They only serve internal logic for supporting transactions. type ControlRecord struct { Version int16 CoordinatorEpoch int32 Type ControlRecordType } func (cr *ControlRecord) decode(key, value packetDecoder) error { var err error // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not // Either way, all these version can only be 0 for now cr.Version, err = key.getInt16() if err != nil { return err } recordType, err := key.getInt16() if err != nil { return err } switch recordType { case 0: cr.Type = ControlRecordAbort case 1: cr.Type = ControlRecordCommit default: // from JAVA implementation: // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored cr.Type = ControlRecordUnknown } // we want to parse value only if we are decoding control record of known type if cr.Type != ControlRecordUnknown { cr.Version, err = value.getInt16() if err != nil { return err } cr.CoordinatorEpoch, err = value.getInt32() if err != nil { return err } } return nil } func (cr *ControlRecord) encode(key, value packetEncoder) { value.putInt16(cr.Version) value.putInt32(cr.CoordinatorEpoch) key.putInt16(cr.Version) switch cr.Type { case ControlRecordAbort: key.putInt16(0) case ControlRecordCommit: key.putInt16(1) } } golang-github-ibm-sarama-1.43.2/control_record_test.go000066400000000000000000000033031461256741300227530ustar00rootroot00000000000000package sarama import ( "testing" ) var ( abortTxCtrlRecKey = []byte{ 0, 0, // version 0, 0, // TX_ABORT = 0 } abortTxCtrlRecValue = []byte{ 0, 0, // version 0, 0, 0, 10, // coordinator epoch } commitTxCtrlRecKey = []byte{ 0, 0, // version 0, 1, // TX_COMMIT = 1 } commitTxCtrlRecValue = []byte{ 0, 0, // version 0, 0, 0, 15, // coordinator epoch } unknownCtrlRecKey = []byte{ 0, 0, // version 0, 128, // UNKNOWN = -1 } // empty value for unknown record unknownCtrlRecValue = []byte{} ) func testDecode(t *testing.T, tp string, key []byte, value []byte) ControlRecord { controlRecord := ControlRecord{} err := controlRecord.decode(&realDecoder{raw: key}, &realDecoder{raw: value}) if err != nil { t.Error("Decoding control record of type " + tp + " failed") return ControlRecord{} } return controlRecord } func assertRecordType(t *testing.T, r *ControlRecord, expected ControlRecordType) { if r.Type != expected { t.Errorf("control record type mismatch, expected: %v, have %v", expected, r.Type) } } func TestDecodingControlRecords(t *testing.T) { abortTx := testDecode(t, "abort transaction", abortTxCtrlRecKey, abortTxCtrlRecValue) assertRecordType(t, &abortTx, ControlRecordAbort) if abortTx.CoordinatorEpoch != 10 { t.Errorf("abort tx control record coordinator epoch mismatch") } commitTx := testDecode(t, "commit transaction", commitTxCtrlRecKey, commitTxCtrlRecValue) if commitTx.CoordinatorEpoch != 15 { t.Errorf("commit tx control record coordinator epoch mismatch") } assertRecordType(t, &commitTx, ControlRecordCommit) unknown := testDecode(t, "unknown", unknownCtrlRecKey, unknownCtrlRecValue) assertRecordType(t, &unknown, ControlRecordUnknown) } golang-github-ibm-sarama-1.43.2/crc32_field.go000066400000000000000000000033561461256741300207650ustar00rootroot00000000000000package sarama import ( "encoding/binary" "fmt" "hash/crc32" "sync" ) type crcPolynomial int8 const ( crcIEEE crcPolynomial = iota crcCastagnoli ) var crc32FieldPool = sync.Pool{} func acquireCrc32Field(polynomial crcPolynomial) *crc32Field { val := crc32FieldPool.Get() if val != nil { c := val.(*crc32Field) c.polynomial = polynomial return c } return newCRC32Field(polynomial) } func releaseCrc32Field(c *crc32Field) { crc32FieldPool.Put(c) } var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. type crc32Field struct { startOffset int polynomial crcPolynomial } func (c *crc32Field) saveOffset(in int) { c.startOffset = in } func (c *crc32Field) reserveLength() int { return 4 } func newCRC32Field(polynomial crcPolynomial) *crc32Field { return &crc32Field{polynomial: polynomial} } func (c *crc32Field) run(curOffset int, buf []byte) error { crc, err := c.crc(curOffset, buf) if err != nil { return err } binary.BigEndian.PutUint32(buf[c.startOffset:], crc) return nil } func (c *crc32Field) check(curOffset int, buf []byte) error { crc, err := c.crc(curOffset, buf) if err != nil { return err } expected := binary.BigEndian.Uint32(buf[c.startOffset:]) if crc != expected { return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} } return nil } func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { var tab *crc32.Table switch c.polynomial { case crcIEEE: tab = crc32.IEEETable case crcCastagnoli: tab = castagnoliTable default: return 0, PacketDecodingError{"invalid CRC type"} } return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil } golang-github-ibm-sarama-1.43.2/create_partitions_request.go000066400000000000000000000050131461256741300241650ustar00rootroot00000000000000package sarama import "time" type CreatePartitionsRequest struct { Version int16 TopicPartitions map[string]*TopicPartition Timeout time.Duration ValidateOnly bool } func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { return err } for topic, partition := range c.TopicPartitions { if err := pe.putString(topic); err != nil { return err } if err := partition.encode(pe); err != nil { return err } } pe.putInt32(int32(c.Timeout / time.Millisecond)) pe.putBool(c.ValidateOnly) return nil } func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { n, err := pd.getArrayLength() if err != nil { return err } c.TopicPartitions = make(map[string]*TopicPartition, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } c.TopicPartitions[topic] = new(TopicPartition) if err := c.TopicPartitions[topic].decode(pd, version); err != nil { return err } } timeout, err := pd.getInt32() if err != nil { return err } c.Timeout = time.Duration(timeout) * time.Millisecond if c.ValidateOnly, err = pd.getBool(); err != nil { return err } return nil } func (r *CreatePartitionsRequest) key() int16 { return 37 } func (r *CreatePartitionsRequest) version() int16 { return r.Version } func (r *CreatePartitionsRequest) headerVersion() int16 { return 1 } func (r *CreatePartitionsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_0_0_0 case 0: return V1_0_0_0 default: return V2_0_0_0 } } type TopicPartition struct { Count int32 Assignment [][]int32 } func (t *TopicPartition) encode(pe packetEncoder) error { pe.putInt32(t.Count) if len(t.Assignment) == 0 { pe.putInt32(-1) return nil } if err := pe.putArrayLength(len(t.Assignment)); err != nil { return err } for _, assign := range t.Assignment { if err := pe.putInt32Array(assign); err != nil { return err } } return nil } func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { if t.Count, err = pd.getInt32(); err != nil { return err } n, err := pd.getInt32() if err != nil { return err } if n <= 0 { return nil } t.Assignment = make([][]int32, n) for i := 0; i < int(n); i++ { if t.Assignment[i], err = pd.getInt32Array(); err != nil { return err } } return nil } golang-github-ibm-sarama-1.43.2/create_partitions_request_test.go000066400000000000000000000021451461256741300252270ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( createPartitionRequestNoAssignment = []byte{ 0, 0, 0, 1, // one topic 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 3, // 3 partitions 255, 255, 255, 255, // no assignments 0, 0, 0, 100, // timeout 0, // validate only = false } createPartitionRequestAssignment = []byte{ 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 3, // 3 partitions 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 100, 1, // validate only = true } ) func TestCreatePartitionsRequest(t *testing.T) { req := &CreatePartitionsRequest{ TopicPartitions: map[string]*TopicPartition{ "topic": { Count: 3, }, }, Timeout: 100 * time.Millisecond, } buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment) testRequestDecode(t, "no assignment", req, buf) req.ValidateOnly = true req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}} buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment) testRequestDecode(t, "assignment", req, buf) } golang-github-ibm-sarama-1.43.2/create_partitions_response.go000066400000000000000000000047751461256741300243510ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) type CreatePartitionsResponse struct { Version int16 ThrottleTime time.Duration TopicPartitionErrors map[string]*TopicPartitionError } func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { return err } for topic, partitionError := range c.TopicPartitionErrors { if err := pe.putString(topic); err != nil { return err } if err := partitionError.encode(pe); err != nil { return err } } return nil } func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } c.TopicPartitionErrors[topic] = new(TopicPartitionError) if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { return err } } return nil } func (r *CreatePartitionsResponse) key() int16 { return 37 } func (r *CreatePartitionsResponse) version() int16 { return r.Version } func (r *CreatePartitionsResponse) headerVersion() int16 { return 0 } func (r *CreatePartitionsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_0_0_0 case 0: return V1_0_0_0 default: return V2_0_0_0 } } func (r *CreatePartitionsResponse) throttleTime() time.Duration { return r.ThrottleTime } type TopicPartitionError struct { Err KError ErrMsg *string } func (t *TopicPartitionError) Error() string { text := t.Err.Error() if t.ErrMsg != nil { text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) } return text } func (t *TopicPartitionError) Unwrap() error { return t.Err } func (t *TopicPartitionError) encode(pe packetEncoder) error { pe.putInt16(int16(t.Err)) if err := pe.putNullableString(t.ErrMsg); err != nil { return err } return nil } func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { kerr, err := pd.getInt16() if err != nil { return err } t.Err = KError(kerr) if t.ErrMsg, err = pd.getNullableString(); err != nil { return err } return nil } golang-github-ibm-sarama-1.43.2/create_partitions_response_test.go000066400000000000000000000041471461256741300254010ustar00rootroot00000000000000package sarama import ( "errors" "reflect" "testing" "time" ) var ( createPartitionResponseSuccess = []byte{ 0, 0, 0, 100, // throttleTimeMs 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, // no error 255, 255, // no error message } createPartitionResponseFail = []byte{ 0, 0, 0, 100, // throttleTimeMs 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 37, // partition error 0, 5, 'e', 'r', 'r', 'o', 'r', } ) func TestCreatePartitionsResponse(t *testing.T) { resp := &CreatePartitionsResponse{ ThrottleTime: 100 * time.Millisecond, TopicPartitionErrors: map[string]*TopicPartitionError{ "topic": {}, }, } testResponse(t, "success", resp, createPartitionResponseSuccess) decodedresp := new(CreatePartitionsResponse) testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0) if !reflect.DeepEqual(decodedresp, resp) { t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp) } errMsg := "error" resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg testResponse(t, "with errors", resp, createPartitionResponseFail) decodedresp = new(CreatePartitionsResponse) testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0) if !reflect.DeepEqual(decodedresp, resp) { t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp) } } func TestTopicPartitionError(t *testing.T) { // Assert that TopicPartitionError satisfies error interface var err error = &TopicPartitionError{ Err: ErrTopicAuthorizationFailed, } if !errors.Is(err, ErrTopicAuthorizationFailed) { t.Errorf("unexpected errors.Is") } got := err.Error() want := ErrTopicAuthorizationFailed.Error() if got != want { t.Errorf("TopicPartitionError.Error() = %v; want %v", got, want) } msg := "reason why topic authorization failed" err = &TopicPartitionError{ Err: ErrTopicAuthorizationFailed, ErrMsg: &msg, } got = err.Error() want = ErrTopicAuthorizationFailed.Error() + " - " + msg if got != want { t.Errorf("TopicPartitionError.Error() = %v; want %v", got, want) } } golang-github-ibm-sarama-1.43.2/create_topics_request.go000066400000000000000000000104521461256741300232750ustar00rootroot00000000000000package sarama import ( "time" ) type CreateTopicsRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // TopicDetails contains the topics to create. TopicDetails map[string]*TopicDetail // Timeout contains how long to wait before timing out the request. Timeout time.Duration // ValidateOnly if true, check that the topics can be created as specified, // but don't create anything. ValidateOnly bool } func (c *CreateTopicsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { return err } for topic, detail := range c.TopicDetails { if err := pe.putString(topic); err != nil { return err } if err := detail.encode(pe); err != nil { return err } } pe.putInt32(int32(c.Timeout / time.Millisecond)) if c.Version >= 1 { pe.putBool(c.ValidateOnly) } return nil } func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { n, err := pd.getArrayLength() if err != nil { return err } c.TopicDetails = make(map[string]*TopicDetail, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } c.TopicDetails[topic] = new(TopicDetail) if err = c.TopicDetails[topic].decode(pd, version); err != nil { return err } } timeout, err := pd.getInt32() if err != nil { return err } c.Timeout = time.Duration(timeout) * time.Millisecond if version >= 1 { c.ValidateOnly, err = pd.getBool() if err != nil { return err } c.Version = version } return nil } func (c *CreateTopicsRequest) key() int16 { return 19 } func (c *CreateTopicsRequest) version() int16 { return c.Version } func (r *CreateTopicsRequest) headerVersion() int16 { return 1 } func (c *CreateTopicsRequest) isValidVersion() bool { return c.Version >= 0 && c.Version <= 3 } func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_2_0 case 0: return V0_10_1_0 default: return V2_8_0_0 } } type TopicDetail struct { // NumPartitions contains the number of partitions to create in the topic, or // -1 if we are either specifying a manual partition assignment or using the // default partitions. NumPartitions int32 // ReplicationFactor contains the number of replicas to create for each // partition in the topic, or -1 if we are either specifying a manual // partition assignment or using the default replication factor. ReplicationFactor int16 // ReplicaAssignment contains the manual partition assignment, or the empty // array if we are using automatic assignment. ReplicaAssignment map[int32][]int32 // ConfigEntries contains the custom topic configurations to set. ConfigEntries map[string]*string } func (t *TopicDetail) encode(pe packetEncoder) error { pe.putInt32(t.NumPartitions) pe.putInt16(t.ReplicationFactor) if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { return err } for partition, assignment := range t.ReplicaAssignment { pe.putInt32(partition) if err := pe.putInt32Array(assignment); err != nil { return err } } if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { return err } for configKey, configValue := range t.ConfigEntries { if err := pe.putString(configKey); err != nil { return err } if err := pe.putNullableString(configValue); err != nil { return err } } return nil } func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { if t.NumPartitions, err = pd.getInt32(); err != nil { return err } if t.ReplicationFactor, err = pd.getInt16(); err != nil { return err } n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { t.ReplicaAssignment = make(map[int32][]int32, n) for i := 0; i < n; i++ { replica, err := pd.getInt32() if err != nil { return err } if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { return err } } } n, err = pd.getArrayLength() if err != nil { return err } if n > 0 { t.ConfigEntries = make(map[string]*string, n) for i := 0; i < n; i++ { configKey, err := pd.getString() if err != nil { return err } if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { return err } } } return nil } golang-github-ibm-sarama-1.43.2/create_topics_request_test.go000066400000000000000000000020041461256741300243260ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( createTopicsRequestV0 = []byte{ 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 255, 255, 255, 255, 255, 255, 0, 0, 0, 1, // 1 replica assignment 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, // 1 config 0, 12, 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', 0, 2, '-', '1', 0, 0, 0, 100, } createTopicsRequestV1 = append(createTopicsRequestV0, byte(1)) ) func TestCreateTopicsRequest(t *testing.T) { retention := "-1" req := &CreateTopicsRequest{ TopicDetails: map[string]*TopicDetail{ "topic": { NumPartitions: -1, ReplicationFactor: -1, ReplicaAssignment: map[int32][]int32{ 0: {0, 1, 2}, }, ConfigEntries: map[string]*string{ "retention.ms": &retention, }, }, }, Timeout: 100 * time.Millisecond, } testRequest(t, "version 0", req, createTopicsRequestV0) req.Version = 1 req.ValidateOnly = true testRequest(t, "version 1", req, createTopicsRequestV1) } golang-github-ibm-sarama-1.43.2/create_topics_response.go000066400000000000000000000054761461256741300234550ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) type CreateTopicsResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTime contains the duration for which the request was throttled due // to a quota violation, or zero if the request did not violate any quota. ThrottleTime time.Duration // TopicErrors contains a map of any errors for the topics we tried to create. TopicErrors map[string]*TopicError } func (c *CreateTopicsResponse) encode(pe packetEncoder) error { if c.Version >= 2 { pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) } if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { return err } for topic, topicError := range c.TopicErrors { if err := pe.putString(topic); err != nil { return err } if err := topicError.encode(pe, c.Version); err != nil { return err } } return nil } func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { c.Version = version if version >= 2 { throttleTime, err := pd.getInt32() if err != nil { return err } c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond } n, err := pd.getArrayLength() if err != nil { return err } c.TopicErrors = make(map[string]*TopicError, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } c.TopicErrors[topic] = new(TopicError) if err := c.TopicErrors[topic].decode(pd, version); err != nil { return err } } return nil } func (c *CreateTopicsResponse) key() int16 { return 19 } func (c *CreateTopicsResponse) version() int16 { return c.Version } func (c *CreateTopicsResponse) headerVersion() int16 { return 0 } func (c *CreateTopicsResponse) isValidVersion() bool { return c.Version >= 0 && c.Version <= 3 } func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_2_0 case 0: return V0_10_1_0 default: return V2_8_0_0 } } func (r *CreateTopicsResponse) throttleTime() time.Duration { return r.ThrottleTime } type TopicError struct { Err KError ErrMsg *string } func (t *TopicError) Error() string { text := t.Err.Error() if t.ErrMsg != nil { text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) } return text } func (t *TopicError) Unwrap() error { return t.Err } func (t *TopicError) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(t.Err)) if version >= 1 { if err := pe.putNullableString(t.ErrMsg); err != nil { return err } } return nil } func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { kErr, err := pd.getInt16() if err != nil { return err } t.Err = KError(kErr) if version >= 1 { if t.ErrMsg, err = pd.getNullableString(); err != nil { return err } } return nil } golang-github-ibm-sarama-1.43.2/create_topics_response_test.go000066400000000000000000000030571461256741300245050ustar00rootroot00000000000000package sarama import ( "errors" "testing" "time" ) var ( createTopicsResponseV0 = []byte{ 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 42, } createTopicsResponseV1 = []byte{ 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 42, 0, 3, 'm', 's', 'g', } createTopicsResponseV2 = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 42, 0, 3, 'm', 's', 'g', } ) func TestCreateTopicsResponse(t *testing.T) { resp := &CreateTopicsResponse{ TopicErrors: map[string]*TopicError{ "topic": { Err: ErrInvalidRequest, }, }, } testResponse(t, "version 0", resp, createTopicsResponseV0) resp.Version = 1 msg := "msg" resp.TopicErrors["topic"].ErrMsg = &msg testResponse(t, "version 1", resp, createTopicsResponseV1) resp.Version = 2 resp.ThrottleTime = 100 * time.Millisecond testResponse(t, "version 2", resp, createTopicsResponseV2) } func TestTopicError(t *testing.T) { // Assert that TopicError satisfies error interface var err error = &TopicError{ Err: ErrTopicAuthorizationFailed, } if !errors.Is(err, ErrTopicAuthorizationFailed) { t.Errorf("unexpected errors.Is") } got := err.Error() want := ErrTopicAuthorizationFailed.Error() if got != want { t.Errorf("TopicError.Error() = %v; want %v", got, want) } msg := "reason why topic authorization failed" err = &TopicError{ Err: ErrTopicAuthorizationFailed, ErrMsg: &msg, } got = err.Error() want = ErrTopicAuthorizationFailed.Error() + " - " + msg if got != want { t.Errorf("TopicError.Error() = %v; want %v", got, want) } } golang-github-ibm-sarama-1.43.2/decompress.go000066400000000000000000000042471461256741300210520ustar00rootroot00000000000000package sarama import ( "bytes" "fmt" "sync" snappy "github.com/eapache/go-xerial-snappy" "github.com/klauspost/compress/gzip" "github.com/pierrec/lz4/v4" ) var ( lz4ReaderPool = sync.Pool{ New: func() interface{} { return lz4.NewReader(nil) }, } gzipReaderPool sync.Pool bufferPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } bytesPool = sync.Pool{ New: func() interface{} { res := make([]byte, 0, 4096) return &res }, } ) func decompress(cc CompressionCodec, data []byte) ([]byte, error) { switch cc { case CompressionNone: return data, nil case CompressionGZIP: var err error reader, ok := gzipReaderPool.Get().(*gzip.Reader) if !ok { reader, err = gzip.NewReader(bytes.NewReader(data)) } else { err = reader.Reset(bytes.NewReader(data)) } if err != nil { return nil, err } buffer := bufferPool.Get().(*bytes.Buffer) _, err = buffer.ReadFrom(reader) // copy the buffer to a new slice with the correct length // reuse gzipReader and buffer gzipReaderPool.Put(reader) res := make([]byte, buffer.Len()) copy(res, buffer.Bytes()) buffer.Reset() bufferPool.Put(buffer) return res, err case CompressionSnappy: return snappy.Decode(data) case CompressionLZ4: reader, ok := lz4ReaderPool.Get().(*lz4.Reader) if !ok { reader = lz4.NewReader(bytes.NewReader(data)) } else { reader.Reset(bytes.NewReader(data)) } buffer := bufferPool.Get().(*bytes.Buffer) _, err := buffer.ReadFrom(reader) // copy the buffer to a new slice with the correct length // reuse lz4Reader and buffer lz4ReaderPool.Put(reader) res := make([]byte, buffer.Len()) copy(res, buffer.Bytes()) buffer.Reset() bufferPool.Put(buffer) return res, err case CompressionZSTD: buffer := *bytesPool.Get().(*[]byte) var err error buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) // copy the buffer to a new slice with the correct length and reuse buffer res := make([]byte, len(buffer)) copy(res, buffer) buffer = buffer[:0] bytesPool.Put(&buffer) return res, err default: return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} } } golang-github-ibm-sarama-1.43.2/delete_groups_request.go000066400000000000000000000015541461256741300233150ustar00rootroot00000000000000package sarama type DeleteGroupsRequest struct { Version int16 Groups []string } func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { return pe.putStringArray(r.Groups) } func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { r.Groups, err = pd.getStringArray() return } func (r *DeleteGroupsRequest) key() int16 { return 42 } func (r *DeleteGroupsRequest) version() int16 { return r.Version } func (r *DeleteGroupsRequest) headerVersion() int16 { return 1 } func (r *DeleteGroupsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_0_0_0 case 0: return V1_1_0_0 default: return V2_0_0_0 } } func (r *DeleteGroupsRequest) AddGroup(group string) { r.Groups = append(r.Groups, group) } golang-github-ibm-sarama-1.43.2/delete_groups_request_test.go000066400000000000000000000014531461256741300243520ustar00rootroot00000000000000package sarama import "testing" var ( emptyDeleteGroupsRequest = []byte{0, 0, 0, 0} singleDeleteGroupsRequest = []byte{ 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name: foo } doubleDeleteGroupsRequest = []byte{ 0, 0, 0, 2, // 2 groups 0, 3, 'f', 'o', 'o', // group name: foo 0, 3, 'b', 'a', 'r', // group name: foo } ) func TestDeleteGroupsRequest(t *testing.T) { var request *DeleteGroupsRequest request = new(DeleteGroupsRequest) testRequest(t, "no groups", request, emptyDeleteGroupsRequest) request = new(DeleteGroupsRequest) request.AddGroup("foo") testRequest(t, "one group", request, singleDeleteGroupsRequest) request = new(DeleteGroupsRequest) request.AddGroup("foo") request.AddGroup("bar") testRequest(t, "two groups", request, doubleDeleteGroupsRequest) } golang-github-ibm-sarama-1.43.2/delete_groups_response.go000066400000000000000000000032211461256741300234540ustar00rootroot00000000000000package sarama import ( "time" ) type DeleteGroupsResponse struct { Version int16 ThrottleTime time.Duration GroupErrorCodes map[string]KError } func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { return err } for groupID, errorCode := range r.GroupErrorCodes { if err := pe.putString(groupID); err != nil { return err } pe.putInt16(int16(errorCode)) } return nil } func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } if n == 0 { return nil } r.GroupErrorCodes = make(map[string]KError, n) for i := 0; i < n; i++ { groupID, err := pd.getString() if err != nil { return err } errorCode, err := pd.getInt16() if err != nil { return err } r.GroupErrorCodes[groupID] = KError(errorCode) } return nil } func (r *DeleteGroupsResponse) key() int16 { return 42 } func (r *DeleteGroupsResponse) version() int16 { return r.Version } func (r *DeleteGroupsResponse) headerVersion() int16 { return 0 } func (r *DeleteGroupsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_0_0_0 case 0: return V1_1_0_0 default: return V2_0_0_0 } } func (r *DeleteGroupsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/delete_groups_response_test.go000066400000000000000000000030751461256741300245220ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var ( emptyDeleteGroupsResponse = []byte{ 0, 0, 0, 0, // does not violate any quota 0, 0, 0, 0, // no groups } errorDeleteGroupsResponse = []byte{ 0, 0, 0, 0, // does not violate any quota 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name 0, 31, // error ErrClusterAuthorizationFailed } noErrorDeleteGroupsResponse = []byte{ 0, 0, 0, 0, // does not violate any quota 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name 0, 0, // no error } ) func TestDeleteGroupsResponse(t *testing.T) { var response *DeleteGroupsResponse response = new(DeleteGroupsResponse) testVersionDecodable(t, "empty", response, emptyDeleteGroupsResponse, 0) if response.ThrottleTime != 0 { t.Error("Expected no violation") } if len(response.GroupErrorCodes) != 0 { t.Error("Expected no groups") } response = new(DeleteGroupsResponse) testVersionDecodable(t, "error", response, errorDeleteGroupsResponse, 0) if response.ThrottleTime != 0 { t.Error("Expected no violation") } if !errors.Is(response.GroupErrorCodes["foo"], ErrClusterAuthorizationFailed) { t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"]) } response = new(DeleteGroupsResponse) testVersionDecodable(t, "no error", response, noErrorDeleteGroupsResponse, 0) if response.ThrottleTime != 0 { t.Error("Expected no violation") } if !errors.Is(response.GroupErrorCodes["foo"], ErrNoError) { t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"]) } } golang-github-ibm-sarama-1.43.2/delete_offsets_request.go000066400000000000000000000034561461256741300234520ustar00rootroot00000000000000package sarama type DeleteOffsetsRequest struct { Version int16 Group string partitions map[string][]int32 } func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) { err = pe.putString(r.Group) if err != nil { return err } if r.partitions == nil { pe.putInt32(0) } else { if err = pe.putArrayLength(len(r.partitions)); err != nil { return err } } for topic, partitions := range r.partitions { err = pe.putString(topic) if err != nil { return err } err = pe.putInt32Array(partitions) if err != nil { return err } } return } func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) { r.Group, err = pd.getString() if err != nil { return err } var partitionCount int partitionCount, err = pd.getArrayLength() if err != nil { return err } if (partitionCount == 0 && version < 2) || partitionCount < 0 { return nil } r.partitions = make(map[string][]int32, partitionCount) for i := 0; i < partitionCount; i++ { var topic string topic, err = pd.getString() if err != nil { return err } var partitions []int32 partitions, err = pd.getInt32Array() if err != nil { return err } r.partitions[topic] = partitions } return nil } func (r *DeleteOffsetsRequest) key() int16 { return 47 } func (r *DeleteOffsetsRequest) version() int16 { return r.Version } func (r *DeleteOffsetsRequest) headerVersion() int16 { return 1 } func (r *DeleteOffsetsRequest) isValidVersion() bool { return r.Version == 0 } func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) { if r.partitions == nil { r.partitions = make(map[string][]int32) } r.partitions[topic] = append(r.partitions[topic], partitionID) } golang-github-ibm-sarama-1.43.2/delete_offsets_request_test.go000066400000000000000000000017251461256741300245060ustar00rootroot00000000000000package sarama import "testing" var ( emptyDeleteOffsetsRequest = []byte{ 0, 3, 'f', 'o', 'o', // group name: foo 0, 0, 0, 0, // 0 partition } ) func TestDeleteOffsetsRequest(t *testing.T) { var request *DeleteOffsetsRequest request = new(DeleteOffsetsRequest) request.Group = "foo" testRequest(t, "no offset", request, emptyDeleteOffsetsRequest) request = new(DeleteOffsetsRequest) request.Group = "foo" request.AddPartition("bar", 6) request.AddPartition("bar", 7) // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. testRequest(t, "two offsets on one topic", request, nil) request = new(DeleteOffsetsRequest) request.Group = "foo" request.AddPartition("bar", 6) request.AddPartition("bar", 7) request.AddPartition("baz", 0) // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. testRequest(t, "three offsets on two topics", request, nil) } golang-github-ibm-sarama-1.43.2/delete_offsets_response.go000066400000000000000000000047441461256741300236210ustar00rootroot00000000000000package sarama import ( "time" ) type DeleteOffsetsResponse struct { Version int16 // The top-level error code, or 0 if there was no error. ErrorCode KError ThrottleTime time.Duration // The responses for each partition of the topics. Errors map[string]map[int32]KError } func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) { if r.Errors == nil { r.Errors = make(map[string]map[int32]KError) } partitions := r.Errors[topic] if partitions == nil { partitions = make(map[int32]KError) r.Errors[topic] = partitions } partitions[partition] = errorCode } func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error { pe.putInt16(int16(r.ErrorCode)) pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(r.Errors)); err != nil { return err } for topic, partitions := range r.Errors { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(partitions)); err != nil { return err } for partition, errorCode := range partitions { pe.putInt32(partition) pe.putInt16(int16(errorCode)) } } return nil } func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) error { tmpErr, err := pd.getInt16() if err != nil { return err } r.ErrorCode = KError(tmpErr) throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err } r.Errors = make(map[string]map[int32]KError, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { return err } numErrors, err := pd.getArrayLength() if err != nil { return err } r.Errors[name] = make(map[int32]KError, numErrors) for j := 0; j < numErrors; j++ { id, err := pd.getInt32() if err != nil { return err } tmp, err := pd.getInt16() if err != nil { return err } r.Errors[name][id] = KError(tmp) } } return nil } func (r *DeleteOffsetsResponse) key() int16 { return 47 } func (r *DeleteOffsetsResponse) version() int16 { return r.Version } func (r *DeleteOffsetsResponse) headerVersion() int16 { return 0 } func (r *DeleteOffsetsResponse) isValidVersion() bool { return r.Version == 0 } func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *DeleteOffsetsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/delete_offsets_response_test.go000066400000000000000000000032031461256741300246450ustar00rootroot00000000000000package sarama import ( "testing" ) var ( emptyDeleteOffsetsResponse = []byte{ 0, 0, // no error 0, 0, 0, 0, // 0 throttle 0, 0, 0, 0, // 0 topics } errorDeleteOffsetsResponse = []byte{ 0, 16, // error 16 : ErrNotCoordinatorForConsumer 0, 0, 0, 0, // 0 throttle 0, 0, 0, 1, // 1 topic 0, 3, 'b', 'a', 'r', // topic name: bar 0, 0, 0, 1, // 1 partition 0, 0, 0, 6, // partition 6 0, 0, // no error } errorOnPartitionResponse = []byte{ 0, 0, // no error 0, 0, 0, 0, // 0 throttle 0, 0, 0, 1, // 1 topic 0, 3, 'b', 'a', 'r', // topic name: bar 0, 0, 0, 1, // 1 partition 0, 0, 0, 6, // partition 6 0, 86, // error ErrGroupSubscribedToTopic=86 } ) func TestDeleteOffsetsResponse(t *testing.T) { var response *DeleteOffsetsResponse response = &DeleteOffsetsResponse{ ErrorCode: 0, ThrottleTime: 0, } testResponse(t, "empty no error", response, emptyDeleteOffsetsResponse) response = &DeleteOffsetsResponse{ ErrorCode: 0, ThrottleTime: 0, Errors: map[string]map[int32]KError{ "bar": { 6: 0, 7: 0, }, }, } // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. testResponse(t, "no error", response, nil) response = &DeleteOffsetsResponse{ ErrorCode: 16, ThrottleTime: 0, Errors: map[string]map[int32]KError{ "bar": { 6: 0, }, }, } testResponse(t, "error global", response, errorDeleteOffsetsResponse) response = &DeleteOffsetsResponse{ ErrorCode: 0, ThrottleTime: 0, } response.AddError("bar", 6, ErrGroupSubscribedToTopic) testResponse(t, "error partition", response, errorOnPartitionResponse) } golang-github-ibm-sarama-1.43.2/delete_records_request.go000066400000000000000000000055071461256741300234410ustar00rootroot00000000000000package sarama import ( "sort" "time" ) // request message format is: // [topic] timeout(int32) // where topic is: // name(string) [partition] // where partition is: // id(int32) offset(int64) type DeleteRecordsRequest struct { Version int16 Topics map[string]*DeleteRecordsRequestTopic Timeout time.Duration } func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(d.Topics)); err != nil { return err } keys := make([]string, 0, len(d.Topics)) for topic := range d.Topics { keys = append(keys, topic) } sort.Strings(keys) for _, topic := range keys { if err := pe.putString(topic); err != nil { return err } if err := d.Topics[topic].encode(pe); err != nil { return err } } pe.putInt32(int32(d.Timeout / time.Millisecond)) return nil } func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } details := new(DeleteRecordsRequestTopic) if err = details.decode(pd, version); err != nil { return err } d.Topics[topic] = details } } timeout, err := pd.getInt32() if err != nil { return err } d.Timeout = time.Duration(timeout) * time.Millisecond return nil } func (d *DeleteRecordsRequest) key() int16 { return 21 } func (d *DeleteRecordsRequest) version() int16 { return d.Version } func (d *DeleteRecordsRequest) headerVersion() int16 { return 1 } func (d *DeleteRecordsRequest) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } type DeleteRecordsRequestTopic struct { PartitionOffsets map[int32]int64 // partition => offset } func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { return err } keys := make([]int32, 0, len(t.PartitionOffsets)) for partition := range t.PartitionOffsets { keys = append(keys, partition) } sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) for _, partition := range keys { pe.putInt32(partition) pe.putInt64(t.PartitionOffsets[partition]) } return nil } func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { t.PartitionOffsets = make(map[int32]int64, n) for i := 0; i < n; i++ { partition, err := pd.getInt32() if err != nil { return err } offset, err := pd.getInt64() if err != nil { return err } t.PartitionOffsets[partition] = offset } } return nil } golang-github-ibm-sarama-1.43.2/delete_records_request_test.go000066400000000000000000000011461461256741300244730ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var deleteRecordsRequest = []byte{ 0, 0, 0, 2, 0, 5, 'o', 't', 'h', 'e', 'r', 0, 0, 0, 0, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 2, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 200, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 190, 0, 0, 0, 100, } func TestDeleteRecordsRequest(t *testing.T) { req := &DeleteRecordsRequest{ Topics: map[string]*DeleteRecordsRequestTopic{ "topic": { PartitionOffsets: map[int32]int64{ 19: 200, 20: 190, }, }, "other": {}, }, Timeout: 100 * time.Millisecond, } testRequest(t, "", req, deleteRecordsRequest) } golang-github-ibm-sarama-1.43.2/delete_records_response.go000066400000000000000000000071401461256741300236020ustar00rootroot00000000000000package sarama import ( "sort" "time" ) // response message format is: // throttleMs(int32) [topic] // where topic is: // name(string) [partition] // where partition is: // id(int32) low_watermark(int64) error_code(int16) type DeleteRecordsResponse struct { Version int16 ThrottleTime time.Duration Topics map[string]*DeleteRecordsResponseTopic } func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(d.Topics)); err != nil { return err } keys := make([]string, 0, len(d.Topics)) for topic := range d.Topics { keys = append(keys, topic) } sort.Strings(keys) for _, topic := range keys { if err := pe.putString(topic); err != nil { return err } if err := d.Topics[topic].encode(pe); err != nil { return err } } return nil } func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { d.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } details := new(DeleteRecordsResponseTopic) if err = details.decode(pd, version); err != nil { return err } d.Topics[topic] = details } } return nil } func (d *DeleteRecordsResponse) key() int16 { return 21 } func (d *DeleteRecordsResponse) version() int16 { return d.Version } func (d *DeleteRecordsResponse) headerVersion() int16 { return 0 } func (d *DeleteRecordsResponse) isValidVersion() bool { return d.Version >= 0 && d.Version <= 1 } func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *DeleteRecordsResponse) throttleTime() time.Duration { return r.ThrottleTime } type DeleteRecordsResponseTopic struct { Partitions map[int32]*DeleteRecordsResponsePartition } func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(t.Partitions)); err != nil { return err } keys := make([]int32, 0, len(t.Partitions)) for partition := range t.Partitions { keys = append(keys, partition) } sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) for _, partition := range keys { pe.putInt32(partition) if err := t.Partitions[partition].encode(pe); err != nil { return err } } return nil } func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) for i := 0; i < n; i++ { partition, err := pd.getInt32() if err != nil { return err } details := new(DeleteRecordsResponsePartition) if err = details.decode(pd, version); err != nil { return err } t.Partitions[partition] = details } } return nil } type DeleteRecordsResponsePartition struct { LowWatermark int64 Err KError } func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { pe.putInt64(t.LowWatermark) pe.putInt16(int16(t.Err)) return nil } func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { lowWatermark, err := pd.getInt64() if err != nil { return err } t.LowWatermark = lowWatermark kErr, err := pd.getInt16() if err != nil { return err } t.Err = KError(kErr) return nil } golang-github-ibm-sarama-1.43.2/delete_records_response_test.go000066400000000000000000000013451461256741300246420ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var deleteRecordsResponse = []byte{ 0, 0, 0, 100, 0, 0, 0, 2, 0, 5, 'o', 't', 'h', 'e', 'r', 0, 0, 0, 0, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 2, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 200, 0, 0, 0, 0, 0, 20, 255, 255, 255, 255, 255, 255, 255, 255, 0, 3, } func TestDeleteRecordsResponse(t *testing.T) { resp := &DeleteRecordsResponse{ Version: 0, ThrottleTime: 100 * time.Millisecond, Topics: map[string]*DeleteRecordsResponseTopic{ "topic": { Partitions: map[int32]*DeleteRecordsResponsePartition{ 19: {LowWatermark: 200, Err: 0}, 20: {LowWatermark: -1, Err: 3}, }, }, "other": {}, }, } testResponse(t, "", resp, deleteRecordsResponse) } golang-github-ibm-sarama-1.43.2/delete_topics_request.go000066400000000000000000000021751461256741300232770ustar00rootroot00000000000000package sarama import "time" type DeleteTopicsRequest struct { Version int16 Topics []string Timeout time.Duration } func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { if err := pe.putStringArray(d.Topics); err != nil { return err } pe.putInt32(int32(d.Timeout / time.Millisecond)) return nil } func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { if d.Topics, err = pd.getStringArray(); err != nil { return err } timeout, err := pd.getInt32() if err != nil { return err } d.Timeout = time.Duration(timeout) * time.Millisecond d.Version = version return nil } func (d *DeleteTopicsRequest) key() int16 { return 20 } func (d *DeleteTopicsRequest) version() int16 { return d.Version } func (d *DeleteTopicsRequest) headerVersion() int16 { return 1 } func (d *DeleteTopicsRequest) isValidVersion() bool { return d.Version >= 0 && d.Version <= 3 } func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { case 3: return V2_1_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_10_1_0 default: return V2_2_0_0 } } golang-github-ibm-sarama-1.43.2/delete_topics_request_test.go000066400000000000000000000011431461256741300243300ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var deleteTopicsRequest = []byte{ 0, 0, 0, 2, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 5, 'o', 't', 'h', 'e', 'r', 0, 0, 0, 100, } func TestDeleteTopicsRequestV0(t *testing.T) { req := &DeleteTopicsRequest{ Version: 0, Topics: []string{"topic", "other"}, Timeout: 100 * time.Millisecond, } testRequest(t, "", req, deleteTopicsRequest) } func TestDeleteTopicsRequestV1(t *testing.T) { req := &DeleteTopicsRequest{ Version: 1, Topics: []string{"topic", "other"}, Timeout: 100 * time.Millisecond, } testRequest(t, "", req, deleteTopicsRequest) } golang-github-ibm-sarama-1.43.2/delete_topics_response.go000066400000000000000000000033611461256741300234430ustar00rootroot00000000000000package sarama import "time" type DeleteTopicsResponse struct { Version int16 ThrottleTime time.Duration TopicErrorCodes map[string]KError } func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { if d.Version >= 1 { pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) } if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { return err } for topic, errorCode := range d.TopicErrorCodes { if err := pe.putString(topic); err != nil { return err } pe.putInt16(int16(errorCode)) } return nil } func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { if version >= 1 { throttleTime, err := pd.getInt32() if err != nil { return err } d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond d.Version = version } n, err := pd.getArrayLength() if err != nil { return err } d.TopicErrorCodes = make(map[string]KError, n) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } errorCode, err := pd.getInt16() if err != nil { return err } d.TopicErrorCodes[topic] = KError(errorCode) } return nil } func (d *DeleteTopicsResponse) key() int16 { return 20 } func (d *DeleteTopicsResponse) version() int16 { return d.Version } func (d *DeleteTopicsResponse) headerVersion() int16 { return 0 } func (d *DeleteTopicsResponse) isValidVersion() bool { return d.Version >= 0 && d.Version <= 3 } func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { case 3: return V2_1_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_10_1_0 default: return V2_2_0_0 } } func (r *DeleteTopicsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/delete_topics_response_test.go000066400000000000000000000011201461256741300244710ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( deleteTopicsResponseV0 = []byte{ 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, } deleteTopicsResponseV1 = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, } ) func TestDeleteTopicsResponse(t *testing.T) { resp := &DeleteTopicsResponse{ TopicErrorCodes: map[string]KError{ "topic": ErrNoError, }, } testResponse(t, "version 0", resp, deleteTopicsResponseV0) resp.Version = 1 resp.ThrottleTime = 100 * time.Millisecond testResponse(t, "version 1", resp, deleteTopicsResponseV1) } golang-github-ibm-sarama-1.43.2/describe_client_quotas_request.go000066400000000000000000000062171461256741300251670ustar00rootroot00000000000000package sarama // DescribeClientQuotas Request (Version: 0) => [components] strict // components => entity_type match_type match // entity_type => STRING // match_type => INT8 // match => NULLABLE_STRING // strict => BOOLEAN // A filter to be applied to matching client quotas. // Components: the components to filter on // Strict: whether the filter only includes specified components type DescribeClientQuotasRequest struct { Version int16 Components []QuotaFilterComponent Strict bool } // Describe a component for applying a client quota filter. // EntityType: the entity type the filter component applies to ("user", "client-id", "ip") // MatchType: the match type of the filter component (any, exact, default) // Match: the name that's matched exactly (used when MatchType is QuotaMatchExact) type QuotaFilterComponent struct { EntityType QuotaEntityType MatchType QuotaMatchType Match string } func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error { // Components if err := pe.putArrayLength(len(d.Components)); err != nil { return err } for _, c := range d.Components { if err := c.encode(pe); err != nil { return err } } // Strict pe.putBool(d.Strict) return nil } func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error { // Components componentCount, err := pd.getArrayLength() if err != nil { return err } if componentCount > 0 { d.Components = make([]QuotaFilterComponent, componentCount) for i := range d.Components { c := QuotaFilterComponent{} if err = c.decode(pd, version); err != nil { return err } d.Components[i] = c } } else { d.Components = []QuotaFilterComponent{} } // Strict strict, err := pd.getBool() if err != nil { return err } d.Strict = strict return nil } func (d *QuotaFilterComponent) encode(pe packetEncoder) error { // EntityType if err := pe.putString(string(d.EntityType)); err != nil { return err } // MatchType pe.putInt8(int8(d.MatchType)) // Match if d.MatchType == QuotaMatchAny { if err := pe.putNullableString(nil); err != nil { return err } } else if d.MatchType == QuotaMatchDefault { if err := pe.putString(""); err != nil { return err } } else { if err := pe.putString(d.Match); err != nil { return err } } return nil } func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error { // EntityType entityType, err := pd.getString() if err != nil { return err } d.EntityType = QuotaEntityType(entityType) // MatchType matchType, err := pd.getInt8() if err != nil { return err } d.MatchType = QuotaMatchType(matchType) // Match match, err := pd.getNullableString() if err != nil { return err } if match != nil { d.Match = *match } return nil } func (d *DescribeClientQuotasRequest) key() int16 { return 48 } func (d *DescribeClientQuotasRequest) version() int16 { return d.Version } func (d *DescribeClientQuotasRequest) headerVersion() int16 { return 1 } func (d *DescribeClientQuotasRequest) isValidVersion() bool { return d.Version == 0 } func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } golang-github-ibm-sarama-1.43.2/describe_client_quotas_request_test.go000066400000000000000000000044431461256741300262250ustar00rootroot00000000000000package sarama import "testing" var ( describeClientQuotasRequestAll = []byte{ 0, 0, 0, 0, // components len 0, // strict } describeClientQuotasRequestDefaultUser = []byte{ 0, 0, 0, 1, // components len 0, 4, 'u', 's', 'e', 'r', // entity type 1, // match type (default) 0, 0, // match *string 0, // strict } describeClientQuotasRequestOnlySpecificUser = []byte{ 0, 0, 0, 1, // components len 0, 4, 'u', 's', 'e', 'r', // entity type 0, // match type (exact) 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // match *string 1, // strict } describeClientQuotasRequestMultiComponents = []byte{ 0, 0, 0, 2, // components len 0, 4, 'u', 's', 'e', 'r', // entity type 2, // match type (any) 255, 255, // match *string 0, 9, 'c', 'l', 'i', 'e', 'n', 't', '-', 'i', 'd', // entity type 1, // match type (default) 0, 0, // match *string 0, // strict } ) func TestDescribeClientQuotasRequest(t *testing.T) { // Match All req := &DescribeClientQuotasRequest{ Components: []QuotaFilterComponent{}, Strict: false, } testRequest(t, "Match All", req, describeClientQuotasRequestAll) // Match Default User defaultUser := QuotaFilterComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchDefault, } req = &DescribeClientQuotasRequest{ Components: []QuotaFilterComponent{defaultUser}, Strict: false, } testRequest(t, "Match Default User", req, describeClientQuotasRequestDefaultUser) // Match Only Specific User specificUser := QuotaFilterComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchExact, Match: "sarama", } req = &DescribeClientQuotasRequest{ Components: []QuotaFilterComponent{specificUser}, Strict: true, } testRequest(t, "Match Only Specific User", req, describeClientQuotasRequestOnlySpecificUser) // Match default client-id of any user anyUser := QuotaFilterComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchAny, } defaultClientId := QuotaFilterComponent{ EntityType: QuotaEntityClientID, MatchType: QuotaMatchDefault, } req = &DescribeClientQuotasRequest{ Components: []QuotaFilterComponent{anyUser, defaultClientId}, Strict: false, } testRequest(t, "Match default client-id of any user", req, describeClientQuotasRequestMultiComponents) } golang-github-ibm-sarama-1.43.2/describe_client_quotas_response.go000066400000000000000000000123331461256741300253310ustar00rootroot00000000000000package sarama import ( "time" ) // DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries] // throttle_time_ms => INT32 // error_code => INT16 // error_message => NULLABLE_STRING // entries => [entity] [values] // entity => entity_type entity_name // entity_type => STRING // entity_name => NULLABLE_STRING // values => key value // key => STRING // value => FLOAT64 type DescribeClientQuotasResponse struct { Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ErrorCode KError // The error code, or `0` if the quota description succeeded. ErrorMsg *string // The error message, or `null` if the quota description succeeded. Entries []DescribeClientQuotasEntry // A result entry. } type DescribeClientQuotasEntry struct { Entity []QuotaEntityComponent // The quota entity description. Values map[string]float64 // The quota values for the entity. } type QuotaEntityComponent struct { EntityType QuotaEntityType MatchType QuotaMatchType Name string } func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error { // ThrottleTime pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) // ErrorCode pe.putInt16(int16(d.ErrorCode)) // ErrorMsg if err := pe.putNullableString(d.ErrorMsg); err != nil { return err } // Entries if err := pe.putArrayLength(len(d.Entries)); err != nil { return err } for _, e := range d.Entries { if err := e.encode(pe); err != nil { return err } } return nil } func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) error { // ThrottleTime throttleTime, err := pd.getInt32() if err != nil { return err } d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond // ErrorCode errCode, err := pd.getInt16() if err != nil { return err } d.ErrorCode = KError(errCode) // ErrorMsg errMsg, err := pd.getNullableString() if err != nil { return err } d.ErrorMsg = errMsg // Entries entryCount, err := pd.getArrayLength() if err != nil { return err } if entryCount > 0 { d.Entries = make([]DescribeClientQuotasEntry, entryCount) for i := range d.Entries { e := DescribeClientQuotasEntry{} if err = e.decode(pd, version); err != nil { return err } d.Entries[i] = e } } else { d.Entries = []DescribeClientQuotasEntry{} } return nil } func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error { // Entity if err := pe.putArrayLength(len(d.Entity)); err != nil { return err } for _, e := range d.Entity { if err := e.encode(pe); err != nil { return err } } // Values if err := pe.putArrayLength(len(d.Values)); err != nil { return err } for key, value := range d.Values { // key if err := pe.putString(key); err != nil { return err } // value pe.putFloat64(value) } return nil } func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error { // Entity componentCount, err := pd.getArrayLength() if err != nil { return err } if componentCount > 0 { d.Entity = make([]QuotaEntityComponent, componentCount) for i := 0; i < componentCount; i++ { component := QuotaEntityComponent{} if err := component.decode(pd, version); err != nil { return err } d.Entity[i] = component } } else { d.Entity = []QuotaEntityComponent{} } // Values valueCount, err := pd.getArrayLength() if err != nil { return err } if valueCount > 0 { d.Values = make(map[string]float64, valueCount) for i := 0; i < valueCount; i++ { // key key, err := pd.getString() if err != nil { return err } // value value, err := pd.getFloat64() if err != nil { return err } d.Values[key] = value } } else { d.Values = map[string]float64{} } return nil } func (c *QuotaEntityComponent) encode(pe packetEncoder) error { // entity_type if err := pe.putString(string(c.EntityType)); err != nil { return err } // entity_name if c.MatchType == QuotaMatchDefault { if err := pe.putNullableString(nil); err != nil { return err } } else { if err := pe.putString(c.Name); err != nil { return err } } return nil } func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error { // entity_type entityType, err := pd.getString() if err != nil { return err } c.EntityType = QuotaEntityType(entityType) // entity_name entityName, err := pd.getNullableString() if err != nil { return err } if entityName == nil { c.MatchType = QuotaMatchDefault } else { c.MatchType = QuotaMatchExact c.Name = *entityName } return nil } func (d *DescribeClientQuotasResponse) key() int16 { return 48 } func (d *DescribeClientQuotasResponse) version() int16 { return d.Version } func (d *DescribeClientQuotasResponse) headerVersion() int16 { return 0 } func (d *DescribeClientQuotasResponse) isValidVersion() bool { return d.Version == 0 } func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } func (r *DescribeClientQuotasResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/describe_client_quotas_response_test.go000066400000000000000000000062341461256741300263730ustar00rootroot00000000000000package sarama import "testing" var ( describeClientQuotasResponseError = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 35, // ErrorCode 0, 41, 'C', 'u', 's', 't', 'o', 'm', ' ', 'e', 'n', 't', 'i', 't', 'y', ' ', 't', 'y', 'p', 'e', ' ', '\'', 'f', 'a', 'u', 'l', 't', 'y', '\'', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', 'e', 'd', 0, 0, 0, 0, // Entries } describeClientQuotasResponseSingleValue = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 0, // ErrorCode 255, 255, // ErrorMsg (nil) 0, 0, 0, 1, // Entries 0, 0, 0, 1, // Entity 0, 4, 'u', 's', 'e', 'r', // Entity type 255, 255, // Entity name (nil) 0, 0, 0, 1, // Values 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', 65, 46, 132, 128, 0, 0, 0, 0, // 1000000 } describeClientQuotasResponseComplexEntity = []byte{ 0, 0, 0, 0, // ThrottleTime 0, 0, // ErrorCode 255, 255, // ErrorMsg (nil) 0, 0, 0, 2, // Entries 0, 0, 0, 1, // Entity 0, 4, 'u', 's', 'e', 'r', // Entity type 255, 255, // Entity name (nil) 0, 0, 0, 1, // Values 0, 18, 'p', 'r', 'o', 'd', 'u', 'c', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', 65, 46, 132, 128, 0, 0, 0, 0, // 1000000 0, 0, 0, 1, // Entity 0, 9, 'c', 'l', 'i', 'e', 'n', 't', '-', 'i', 'd', // Entity type 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Entity name 0, 0, 0, 1, // Values 0, 18, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', '_', 'b', 'y', 't', 'e', '_', 'r', 'a', 't', 'e', 65, 46, 132, 128, 0, 0, 0, 0, // 1000000 } ) func TestDescribeClientQuotasResponse(t *testing.T) { // Response With Error errMsg := "Custom entity type 'faulty' not supported" res := &DescribeClientQuotasResponse{ ThrottleTime: 0, ErrorCode: ErrUnsupportedVersion, ErrorMsg: &errMsg, Entries: []DescribeClientQuotasEntry{}, } testResponse(t, "Response With Error", res, describeClientQuotasResponseError) // Single Quota entry defaultUserComponent := QuotaEntityComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchDefault, } entry := DescribeClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Values: map[string]float64{"producer_byte_rate": 1000000}, } res = &DescribeClientQuotasResponse{ ThrottleTime: 0, ErrorCode: ErrNoError, ErrorMsg: nil, Entries: []DescribeClientQuotasEntry{entry}, } testResponse(t, "Single Value", res, describeClientQuotasResponseSingleValue) // Complex Quota entry saramaClientIDComponent := QuotaEntityComponent{ EntityType: QuotaEntityClientID, MatchType: QuotaMatchExact, Name: "sarama", } userEntry := DescribeClientQuotasEntry{ Entity: []QuotaEntityComponent{defaultUserComponent}, Values: map[string]float64{"producer_byte_rate": 1000000}, } clientEntry := DescribeClientQuotasEntry{ Entity: []QuotaEntityComponent{saramaClientIDComponent}, Values: map[string]float64{"consumer_byte_rate": 1000000}, } res = &DescribeClientQuotasResponse{ ThrottleTime: 0, ErrorCode: ErrNoError, ErrorMsg: nil, Entries: []DescribeClientQuotasEntry{userEntry, clientEntry}, } testResponse(t, "Complex Quota", res, describeClientQuotasResponseComplexEntity) } golang-github-ibm-sarama-1.43.2/describe_configs_request.go000066400000000000000000000041711461256741300237420ustar00rootroot00000000000000package sarama type DescribeConfigsRequest struct { Version int16 Resources []*ConfigResource IncludeSynonyms bool } type ConfigResource struct { Type ConfigResourceType Name string ConfigNames []string } func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(r.Resources)); err != nil { return err } for _, c := range r.Resources { pe.putInt8(int8(c.Type)) if err := pe.putString(c.Name); err != nil { return err } if len(c.ConfigNames) == 0 { pe.putInt32(-1) continue } if err := pe.putStringArray(c.ConfigNames); err != nil { return err } } if r.Version >= 1 { pe.putBool(r.IncludeSynonyms) } return nil } func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { n, err := pd.getArrayLength() if err != nil { return err } r.Resources = make([]*ConfigResource, n) for i := 0; i < n; i++ { r.Resources[i] = &ConfigResource{} t, err := pd.getInt8() if err != nil { return err } r.Resources[i].Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } r.Resources[i].Name = name confLength, err := pd.getArrayLength() if err != nil { return err } if confLength == -1 { continue } cfnames := make([]string, confLength) for i := 0; i < confLength; i++ { s, err := pd.getString() if err != nil { return err } cfnames[i] = s } r.Resources[i].ConfigNames = cfnames } r.Version = version if r.Version >= 1 { b, err := pd.getBool() if err != nil { return err } r.IncludeSynonyms = b } return nil } func (r *DescribeConfigsRequest) key() int16 { return 32 } func (r *DescribeConfigsRequest) version() int16 { return r.Version } func (r *DescribeConfigsRequest) headerVersion() int16 { return 1 } func (r *DescribeConfigsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 2 } func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { case 2: return V2_0_0_0 case 1: return V1_1_0_0 case 0: return V0_11_0_0 default: return V2_0_0_0 } } golang-github-ibm-sarama-1.43.2/describe_configs_request_test.go000066400000000000000000000053051461256741300250010ustar00rootroot00000000000000package sarama import "testing" var ( emptyDescribeConfigsRequest = []byte{ 0, 0, 0, 0, // 0 configs } singleDescribeConfigsRequest = []byte{ 0, 0, 0, 1, // 1 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 1, // 1 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', } doubleDescribeConfigsRequest = []byte{ 0, 0, 0, 2, // 2 configs 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 2, // 2 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 12, // 12 chars 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', 2, // a topic 0, 3, 'b', 'a', 'r', // topic name: foo 0, 0, 0, 1, // 1 config 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', } singleDescribeConfigsRequestAllConfigs = []byte{ 0, 0, 0, 1, // 1 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 255, 255, 255, 255, // all configs } singleDescribeConfigsRequestAllConfigsv1 = []byte{ 0, 0, 0, 1, // 1 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 255, 255, 255, 255, // no configs 1, // synonyms } ) func TestDescribeConfigsRequestv0(t *testing.T) { var request *DescribeConfigsRequest request = &DescribeConfigsRequest{ Version: 0, Resources: []*ConfigResource{}, } testRequest(t, "no requests", request, emptyDescribeConfigsRequest) configs := []string{"segment.ms"} request = &DescribeConfigsRequest{ Version: 0, Resources: []*ConfigResource{ { Type: TopicResource, Name: "foo", ConfigNames: configs, }, }, } testRequest(t, "one config", request, singleDescribeConfigsRequest) request = &DescribeConfigsRequest{ Version: 0, Resources: []*ConfigResource{ { Type: TopicResource, Name: "foo", ConfigNames: []string{"segment.ms", "retention.ms"}, }, { Type: TopicResource, Name: "bar", ConfigNames: []string{"segment.ms"}, }, }, } testRequest(t, "two configs", request, doubleDescribeConfigsRequest) request = &DescribeConfigsRequest{ Version: 0, Resources: []*ConfigResource{ { Type: TopicResource, Name: "foo", }, }, } testRequest(t, "one topic, all configs", request, singleDescribeConfigsRequestAllConfigs) } func TestDescribeConfigsRequestv1(t *testing.T) { request := &DescribeConfigsRequest{ Version: 1, Resources: []*ConfigResource{ { Type: TopicResource, Name: "foo", }, }, IncludeSynonyms: true, } testRequest(t, "one topic, all configs", request, singleDescribeConfigsRequestAllConfigsv1) } golang-github-ibm-sarama-1.43.2/describe_configs_response.go000066400000000000000000000141301461256741300241040ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) type ConfigSource int8 func (s ConfigSource) String() string { switch s { case SourceUnknown: return "Unknown" case SourceTopic: return "Topic" case SourceDynamicBroker: return "DynamicBroker" case SourceDynamicDefaultBroker: return "DynamicDefaultBroker" case SourceStaticBroker: return "StaticBroker" case SourceDefault: return "Default" } return fmt.Sprintf("Source Invalid: %d", int(s)) } const ( SourceUnknown ConfigSource = iota SourceTopic SourceDynamicBroker SourceDynamicDefaultBroker SourceStaticBroker SourceDefault ) type DescribeConfigError struct { Err KError ErrMsg string } func (c *DescribeConfigError) Error() string { text := c.Err.Error() if c.ErrMsg != "" { text = fmt.Sprintf("%s - %s", text, c.ErrMsg) } return text } type DescribeConfigsResponse struct { Version int16 ThrottleTime time.Duration Resources []*ResourceResponse } type ResourceResponse struct { ErrorCode int16 ErrorMsg string Type ConfigResourceType Name string Configs []*ConfigEntry } type ConfigEntry struct { Name string Value string ReadOnly bool Default bool Source ConfigSource Sensitive bool Synonyms []*ConfigSynonym } type ConfigSynonym struct { ConfigName string ConfigValue string Source ConfigSource } func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) if err = pe.putArrayLength(len(r.Resources)); err != nil { return err } for _, c := range r.Resources { if err = c.encode(pe, r.Version); err != nil { return err } } return nil } func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } r.Resources = make([]*ResourceResponse, n) for i := 0; i < n; i++ { rr := &ResourceResponse{} if err := rr.decode(pd, version); err != nil { return err } r.Resources[i] = rr } return nil } func (r *DescribeConfigsResponse) key() int16 { return 32 } func (r *DescribeConfigsResponse) version() int16 { return r.Version } func (r *DescribeConfigsResponse) headerVersion() int16 { return 0 } func (r *DescribeConfigsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 2 } func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { switch r.Version { case 2: return V2_0_0_0 case 1: return V1_1_0_0 case 0: return V0_11_0_0 default: return V2_0_0_0 } } func (r *DescribeConfigsResponse) throttleTime() time.Duration { return r.ThrottleTime } func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(r.ErrorCode) if err = pe.putString(r.ErrorMsg); err != nil { return err } pe.putInt8(int8(r.Type)) if err = pe.putString(r.Name); err != nil { return err } if err = pe.putArrayLength(len(r.Configs)); err != nil { return err } for _, c := range r.Configs { if err = c.encode(pe, version); err != nil { return err } } return nil } func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { ec, err := pd.getInt16() if err != nil { return err } r.ErrorCode = ec em, err := pd.getString() if err != nil { return err } r.ErrorMsg = em t, err := pd.getInt8() if err != nil { return err } r.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } r.Name = name n, err := pd.getArrayLength() if err != nil { return err } r.Configs = make([]*ConfigEntry, n) for i := 0; i < n; i++ { c := &ConfigEntry{} if err := c.decode(pd, version); err != nil { return err } r.Configs[i] = c } return nil } func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { if err = pe.putString(r.Name); err != nil { return err } if err = pe.putString(r.Value); err != nil { return err } pe.putBool(r.ReadOnly) if version <= 0 { pe.putBool(r.Default) pe.putBool(r.Sensitive) } else { pe.putInt8(int8(r.Source)) pe.putBool(r.Sensitive) if err := pe.putArrayLength(len(r.Synonyms)); err != nil { return err } for _, c := range r.Synonyms { if err = c.encode(pe, version); err != nil { return err } } } return nil } // https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { if version == 0 { r.Source = SourceUnknown } name, err := pd.getString() if err != nil { return err } r.Name = name value, err := pd.getString() if err != nil { return err } r.Value = value read, err := pd.getBool() if err != nil { return err } r.ReadOnly = read if version == 0 { defaultB, err := pd.getBool() if err != nil { return err } r.Default = defaultB if defaultB { r.Source = SourceDefault } } else { source, err := pd.getInt8() if err != nil { return err } r.Source = ConfigSource(source) r.Default = r.Source == SourceDefault } sensitive, err := pd.getBool() if err != nil { return err } r.Sensitive = sensitive if version > 0 { n, err := pd.getArrayLength() if err != nil { return err } r.Synonyms = make([]*ConfigSynonym, n) for i := 0; i < n; i++ { s := &ConfigSynonym{} if err := s.decode(pd, version); err != nil { return err } r.Synonyms[i] = s } } return nil } func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { err = pe.putString(c.ConfigName) if err != nil { return err } err = pe.putString(c.ConfigValue) if err != nil { return err } pe.putInt8(int8(c.Source)) return nil } func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { name, err := pd.getString() if err != nil { return err } c.ConfigName = name value, err := pd.getString() if err != nil { return err } c.ConfigValue = value source, err := pd.getInt8() if err != nil { return err } c.Source = ConfigSource(source) return nil } golang-github-ibm-sarama-1.43.2/describe_configs_response_test.go000066400000000000000000000145141461256741300251510ustar00rootroot00000000000000package sarama import ( "testing" ) var ( describeConfigsResponseEmpty = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 0, // no configs } describeConfigsResponsePopulatedv0 = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', 0, 0, 0, 1, // configs 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // ReadOnly 0, // Default 0, // Sensitive } describeConfigsResponseWithDefaultv0 = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', 0, 0, 0, 1, // configs 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // ReadOnly 1, // Default 0, // Sensitive } describeConfigsResponsePopulatedv1 = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', 0, 0, 0, 1, // configs 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // ReadOnly 4, // Source 0, // Sensitive 0, 0, 0, 0, // No Synonym } describeConfigsResponseWithSynonymv1 = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', 0, 0, 0, 1, // configs 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // ReadOnly 4, // Source 0, // Sensitive 0, 0, 0, 1, // 1 Synonym 0, 14, 'l', 'o', 'g', '.', 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 4, // Source } describeConfigsResponseWithDefaultv1 = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', 0, 0, 0, 1, // configs 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, 4, '1', '0', '0', '0', 0, // ReadOnly 5, // Source 0, // Sensitive 0, 0, 0, 0, // No Synonym } ) func TestDescribeConfigsResponsev0(t *testing.T) { var response *DescribeConfigsResponse response = &DescribeConfigsResponse{ Resources: []*ResourceResponse{}, } testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &DescribeConfigsResponse{ Version: 0, Resources: []*ResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", Configs: []*ConfigEntry{ { Name: "segment.ms", Value: "1000", ReadOnly: false, Default: false, Sensitive: false, Source: SourceUnknown, }, }, }, }, } testResponse(t, "response with error", response, describeConfigsResponsePopulatedv0) } func TestDescribeConfigsResponseWithDefaultv0(t *testing.T) { var response *DescribeConfigsResponse response = &DescribeConfigsResponse{ Resources: []*ResourceResponse{}, } testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &DescribeConfigsResponse{ Version: 0, Resources: []*ResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", Configs: []*ConfigEntry{ { Name: "segment.ms", Value: "1000", ReadOnly: false, Default: true, Sensitive: false, Source: SourceDefault, }, }, }, }, } testResponse(t, "response with default", response, describeConfigsResponseWithDefaultv0) } func TestDescribeConfigsResponsev1(t *testing.T) { var response *DescribeConfigsResponse response = &DescribeConfigsResponse{ Resources: []*ResourceResponse{}, } testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &DescribeConfigsResponse{ Version: 1, Resources: []*ResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", Configs: []*ConfigEntry{ { Name: "segment.ms", Value: "1000", ReadOnly: false, Source: SourceStaticBroker, Default: false, Sensitive: false, Synonyms: []*ConfigSynonym{}, }, }, }, }, } testResponse(t, "response with error", response, describeConfigsResponsePopulatedv1) } func TestDescribeConfigsResponseWithSynonym(t *testing.T) { var response *DescribeConfigsResponse response = &DescribeConfigsResponse{ Resources: []*ResourceResponse{}, } testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &DescribeConfigsResponse{ Version: 1, Resources: []*ResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", Configs: []*ConfigEntry{ { Name: "segment.ms", Value: "1000", ReadOnly: false, Source: SourceStaticBroker, Default: false, Sensitive: false, Synonyms: []*ConfigSynonym{ { ConfigName: "log.segment.ms", ConfigValue: "1000", Source: SourceStaticBroker, }, }, }, }, }, }, } testResponse(t, "response with error", response, describeConfigsResponseWithSynonymv1) } func TestDescribeConfigsResponseWithDefaultv1(t *testing.T) { var response *DescribeConfigsResponse response = &DescribeConfigsResponse{ Resources: []*ResourceResponse{}, } testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &DescribeConfigsResponse{ Version: 1, Resources: []*ResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", Configs: []*ConfigEntry{ { Name: "segment.ms", Value: "1000", ReadOnly: false, Source: SourceDefault, Default: true, Sensitive: false, Synonyms: []*ConfigSynonym{}, }, }, }, }, } testResponse(t, "response with error", response, describeConfigsResponseWithDefaultv1) } golang-github-ibm-sarama-1.43.2/describe_groups_request.go000066400000000000000000000024641461256741300236340ustar00rootroot00000000000000package sarama type DescribeGroupsRequest struct { Version int16 Groups []string IncludeAuthorizedOperations bool } func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { if err := pe.putStringArray(r.Groups); err != nil { return err } if r.Version >= 3 { pe.putBool(r.IncludeAuthorizedOperations) } return nil } func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version r.Groups, err = pd.getStringArray() if err != nil { return err } if r.Version >= 3 { if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil { return err } } return nil } func (r *DescribeGroupsRequest) key() int16 { return 15 } func (r *DescribeGroupsRequest) version() int16 { return r.Version } func (r *DescribeGroupsRequest) headerVersion() int16 { return 1 } func (r *DescribeGroupsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_4_0_0 case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_4_0_0 } } func (r *DescribeGroupsRequest) AddGroup(group string) { r.Groups = append(r.Groups, group) } golang-github-ibm-sarama-1.43.2/describe_groups_request_test.go000066400000000000000000000033611461256741300246700ustar00rootroot00000000000000package sarama import "testing" var ( emptyDescribeGroupsRequest = []byte{0, 0, 0, 0} singleDescribeGroupsRequestV0 = []byte{ 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name: foo } doubleDescribeGroupsRequestV0 = []byte{ 0, 0, 0, 2, // 2 groups 0, 3, 'f', 'o', 'o', // group name: foo 0, 3, 'b', 'a', 'r', // group name: foo } ) func TestDescribeGroupsRequestV0(t *testing.T) { var request *DescribeGroupsRequest request = new(DescribeGroupsRequest) testRequest(t, "no groups", request, emptyDescribeGroupsRequest) request = new(DescribeGroupsRequest) request.AddGroup("foo") testRequest(t, "one group", request, singleDescribeGroupsRequestV0) request = new(DescribeGroupsRequest) request.AddGroup("foo") request.AddGroup("bar") testRequest(t, "two groups", request, doubleDescribeGroupsRequestV0) } var ( emptyDescribeGroupsRequestV3 = []byte{0, 0, 0, 0, 0} singleDescribeGroupsRequestV3 = []byte{ 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name: foo 0, } doubleDescribeGroupsRequestV3 = []byte{ 0, 0, 0, 2, // 2 groups 0, 3, 'f', 'o', 'o', // group name: foo 0, 3, 'b', 'a', 'r', // group name: foo 1, } ) func TestDescribeGroupsRequestV3(t *testing.T) { var request *DescribeGroupsRequest request = new(DescribeGroupsRequest) request.Version = 3 testRequest(t, "no groups", request, emptyDescribeGroupsRequestV3) request = new(DescribeGroupsRequest) request.Version = 3 request.AddGroup("foo") testRequest(t, "one group", request, singleDescribeGroupsRequestV3) request = new(DescribeGroupsRequest) request.Version = 3 request.AddGroup("foo") request.AddGroup("bar") request.IncludeAuthorizedOperations = true testRequest(t, "two groups", request, doubleDescribeGroupsRequestV3) } golang-github-ibm-sarama-1.43.2/describe_groups_response.go000066400000000000000000000155511461256741300240030ustar00rootroot00000000000000package sarama import "time" type DescribeGroupsResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTimeMs contains the duration in milliseconds for which the // request was throttled due to a quota violation, or zero if the request // did not violate any quota. ThrottleTimeMs int32 // Groups contains each described group. Groups []*GroupDescription } func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) { if r.Version >= 1 { pe.putInt32(r.ThrottleTimeMs) } if err := pe.putArrayLength(len(r.Groups)); err != nil { return err } for _, block := range r.Groups { if err := block.encode(pe, r.Version); err != nil { return err } } return nil } func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 1 { if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } if numGroups, err := pd.getArrayLength(); err != nil { return err } else if numGroups > 0 { r.Groups = make([]*GroupDescription, numGroups) for i := 0; i < numGroups; i++ { block := &GroupDescription{} if err := block.decode(pd, r.Version); err != nil { return err } r.Groups[i] = block } } return nil } func (r *DescribeGroupsResponse) key() int16 { return 15 } func (r *DescribeGroupsResponse) version() int16 { return r.Version } func (r *DescribeGroupsResponse) headerVersion() int16 { return 0 } func (r *DescribeGroupsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_4_0_0 case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_4_0_0 } } func (r *DescribeGroupsResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } // GroupDescription contains each described group. type GroupDescription struct { // Version defines the protocol version to use for encode and decode Version int16 // Err contains the describe error as the KError type. Err KError // ErrorCode contains the describe error, or 0 if there was no error. ErrorCode int16 // GroupId contains the group ID string. GroupId string // State contains the group state string, or the empty string. State string // ProtocolType contains the group protocol type, or the empty string. ProtocolType string // Protocol contains the group protocol data, or the empty string. Protocol string // Members contains the group members. Members map[string]*GroupMemberDescription // AuthorizedOperations contains a 32-bit bitfield to represent authorized // operations for this group. AuthorizedOperations int32 } func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) { gd.Version = version pe.putInt16(gd.ErrorCode) if err := pe.putString(gd.GroupId); err != nil { return err } if err := pe.putString(gd.State); err != nil { return err } if err := pe.putString(gd.ProtocolType); err != nil { return err } if err := pe.putString(gd.Protocol); err != nil { return err } if err := pe.putArrayLength(len(gd.Members)); err != nil { return err } for _, block := range gd.Members { if err := block.encode(pe, gd.Version); err != nil { return err } } if gd.Version >= 3 { pe.putInt32(gd.AuthorizedOperations) } return nil } func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) { gd.Version = version if gd.ErrorCode, err = pd.getInt16(); err != nil { return err } gd.Err = KError(gd.ErrorCode) if gd.GroupId, err = pd.getString(); err != nil { return err } if gd.State, err = pd.getString(); err != nil { return err } if gd.ProtocolType, err = pd.getString(); err != nil { return err } if gd.Protocol, err = pd.getString(); err != nil { return err } if numMembers, err := pd.getArrayLength(); err != nil { return err } else if numMembers > 0 { gd.Members = make(map[string]*GroupMemberDescription, numMembers) for i := 0; i < numMembers; i++ { block := &GroupMemberDescription{} if err := block.decode(pd, gd.Version); err != nil { return err } gd.Members[block.MemberId] = block } } if gd.Version >= 3 { if gd.AuthorizedOperations, err = pd.getInt32(); err != nil { return err } } return nil } // GroupMemberDescription contains the group members. type GroupMemberDescription struct { // Version defines the protocol version to use for encode and decode Version int16 // MemberId contains the member ID assigned by the group coordinator. MemberId string // GroupInstanceId contains the unique identifier of the consumer instance // provided by end user. GroupInstanceId *string // ClientId contains the client ID used in the member's latest join group // request. ClientId string // ClientHost contains the client host. ClientHost string // MemberMetadata contains the metadata corresponding to the current group // protocol in use. MemberMetadata []byte // MemberAssignment contains the current assignment provided by the group // leader. MemberAssignment []byte } func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) { gmd.Version = version if err := pe.putString(gmd.MemberId); err != nil { return err } if gmd.Version >= 4 { if err := pe.putNullableString(gmd.GroupInstanceId); err != nil { return err } } if err := pe.putString(gmd.ClientId); err != nil { return err } if err := pe.putString(gmd.ClientHost); err != nil { return err } if err := pe.putBytes(gmd.MemberMetadata); err != nil { return err } if err := pe.putBytes(gmd.MemberAssignment); err != nil { return err } return nil } func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) { gmd.Version = version if gmd.MemberId, err = pd.getString(); err != nil { return err } if gmd.Version >= 4 { if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } } if gmd.ClientId, err = pd.getString(); err != nil { return err } if gmd.ClientHost, err = pd.getString(); err != nil { return err } if gmd.MemberMetadata, err = pd.getBytes(); err != nil { return err } if gmd.MemberAssignment, err = pd.getBytes(); err != nil { return err } return nil } func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { if len(gmd.MemberAssignment) == 0 { return nil, nil } assignment := new(ConsumerGroupMemberAssignment) err := decode(gmd.MemberAssignment, assignment, nil) return assignment, err } func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { if len(gmd.MemberMetadata) == 0 { return nil, nil } metadata := new(ConsumerGroupMemberMetadata) err := decode(gmd.MemberMetadata, metadata, nil) return metadata, err } golang-github-ibm-sarama-1.43.2/describe_groups_response_test.go000066400000000000000000000155761461256741300250510ustar00rootroot00000000000000package sarama import ( "errors" "reflect" "testing" "github.com/stretchr/testify/assert" ) var ( describeGroupsResponseEmptyV0 = []byte{ 0, 0, 0, 0, // no groups } describeGroupsResponsePopulatedV0 = []byte{ 0, 0, 0, 2, // 2 groups 0, 0, // no error 0, 3, 'f', 'o', 'o', // Group ID 0, 3, 'b', 'a', 'r', // State 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type 0, 3, 'b', 'a', 'z', // Protocol name 0, 0, 0, 1, // 1 member 0, 2, 'i', 'd', // Member ID 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment 0, 30, // ErrGroupAuthorizationFailed 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } ) func TestDescribeGroupsResponseV0(t *testing.T) { var response *DescribeGroupsResponse response = new(DescribeGroupsResponse) testVersionDecodable(t, "empty", response, describeGroupsResponseEmptyV0, 0) if len(response.Groups) != 0 { t.Error("Expected no groups") } response = new(DescribeGroupsResponse) testVersionDecodable(t, "populated", response, describeGroupsResponsePopulatedV0, 0) if len(response.Groups) != 2 { t.Error("Expected two groups") } group0 := response.Groups[0] if !errors.Is(group0.Err, ErrNoError) { t.Error("Unxpected groups[0].Err, found", group0.Err) } if group0.GroupId != "foo" { t.Error("Unxpected groups[0].GroupId, found", group0.GroupId) } if group0.State != "bar" { t.Error("Unxpected groups[0].State, found", group0.State) } if group0.ProtocolType != "consumer" { t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType) } if group0.Protocol != "baz" { t.Error("Unxpected groups[0].Protocol, found", group0.Protocol) } if len(group0.Members) != 1 { t.Error("Unxpected groups[0].Members, found", group0.Members) } if group0.Members["id"].ClientId != "sarama" { t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId) } if group0.Members["id"].ClientHost != "localhost" { t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost) } if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) { t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata) } if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) { t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment) } group1 := response.Groups[1] if !errors.Is(group1.Err, ErrGroupAuthorizationFailed) { t.Error("Unxpected groups[1].Err, found", group0.Err) } if len(group1.Members) != 0 { t.Error("Unxpected groups[1].Members, found", group0.Members) } } var ( describeGroupsResponseEmptyV3 = []byte{ 0, 0, 0, 0, // throttle time 0 0, 0, 0, 0, // no groups } describeGroupsResponsePopulatedV3 = []byte{ 0, 0, 0, 0, // throttle time 0 0, 0, 0, 2, // 2 groups 0, 0, // no error 0, 3, 'f', 'o', 'o', // Group ID 0, 3, 'b', 'a', 'r', // State 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type 0, 3, 'b', 'a', 'z', // Protocol name 0, 0, 0, 1, // 1 member 0, 2, 'i', 'd', // Member ID 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment 0, 0, 0, 0, // authorizedOperations 0 0, 30, // ErrGroupAuthorizationFailed 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // authorizedOperations 0 } describeGroupsResponseEmptyV4 = []byte{ 0, 0, 0, 0, // throttle time 0 0, 0, 0, 0, // no groups } describeGroupsResponsePopulatedV4 = []byte{ 0, 0, 0, 0, // throttle time 0 0, 0, 0, 2, // 2 groups 0, 0, // no error 0, 3, 'f', 'o', 'o', // Group ID 0, 3, 'b', 'a', 'r', // State 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type 0, 3, 'b', 'a', 'z', // Protocol name 0, 0, 0, 1, // 1 member 0, 2, 'i', 'd', // Member ID 0, 3, 'g', 'i', 'd', // Group Instance ID 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment 0, 0, 0, 0, // authorizedOperations 0 0, 30, // ErrGroupAuthorizationFailed 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // authorizedOperations 0 } ) func TestDescribeGroupsResponseV1plus(t *testing.T) { groupInstanceId := "gid" tests := []struct { Name string Version int16 MessageBytes []byte Message *DescribeGroupsResponse }{ { "empty", 3, describeGroupsResponseEmptyV3, &DescribeGroupsResponse{ Version: 3, }, }, { "populated", 3, describeGroupsResponsePopulatedV3, &DescribeGroupsResponse{ Version: 3, ThrottleTimeMs: int32(0), Groups: []*GroupDescription{ { Version: 3, Err: KError(0), GroupId: "foo", State: "bar", ProtocolType: "consumer", Protocol: "baz", Members: map[string]*GroupMemberDescription{ "id": { Version: 3, MemberId: "id", ClientId: "sarama", ClientHost: "localhost", MemberMetadata: []byte{1, 2, 3}, MemberAssignment: []byte{4, 5, 6}, }, }, }, { Version: 3, Err: KError(30), ErrorCode: 30, }, }, }, }, { "empty", 4, describeGroupsResponseEmptyV4, &DescribeGroupsResponse{ Version: 4, }, }, { "populated", 4, describeGroupsResponsePopulatedV4, &DescribeGroupsResponse{ Version: 4, ThrottleTimeMs: int32(0), Groups: []*GroupDescription{ { Version: 4, Err: KError(0), GroupId: "foo", State: "bar", ProtocolType: "consumer", Protocol: "baz", Members: map[string]*GroupMemberDescription{ "id": { Version: 4, MemberId: "id", GroupInstanceId: &groupInstanceId, ClientId: "sarama", ClientHost: "localhost", MemberMetadata: []byte{1, 2, 3}, MemberAssignment: []byte{4, 5, 6}, }, }, }, { Version: 4, Err: KError(30), ErrorCode: 30, }, }, }, }, } for _, c := range tests { t.Run(c.Name, func(t *testing.T) { response := new(DescribeGroupsResponse) testVersionDecodable(t, c.Name, response, c.MessageBytes, c.Version) if !assert.Equal(t, c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.Name, c.Message, response) } testEncodable(t, c.Name, c.Message, c.MessageBytes) }) } } golang-github-ibm-sarama-1.43.2/describe_log_dirs_request.go000066400000000000000000000037411461256741300241160ustar00rootroot00000000000000package sarama // DescribeLogDirsRequest is a describe request to get partitions' log size type DescribeLogDirsRequest struct { // Version 0 and 1 are equal // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. Version int16 // If this is an empty array, all topics will be queried DescribeTopics []DescribeLogDirsRequestTopic } // DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic type DescribeLogDirsRequestTopic struct { Topic string PartitionIDs []int32 } func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error { length := len(r.DescribeTopics) if length == 0 { // In order to query all topics we must send null length = -1 } if err := pe.putArrayLength(length); err != nil { return err } for _, d := range r.DescribeTopics { if err := pe.putString(d.Topic); err != nil { return err } if err := pe.putInt32Array(d.PartitionIDs); err != nil { return err } } return nil } func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error { n, err := pd.getArrayLength() if err != nil { return err } if n == -1 { n = 0 } topics := make([]DescribeLogDirsRequestTopic, n) for i := 0; i < n; i++ { topics[i] = DescribeLogDirsRequestTopic{} topic, err := pd.getString() if err != nil { return err } topics[i].Topic = topic pIDs, err := pd.getInt32Array() if err != nil { return err } topics[i].PartitionIDs = pIDs } r.DescribeTopics = topics return nil } func (r *DescribeLogDirsRequest) key() int16 { return 35 } func (r *DescribeLogDirsRequest) version() int16 { return r.Version } func (r *DescribeLogDirsRequest) headerVersion() int16 { return 1 } func (r *DescribeLogDirsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { if r.Version > 0 { return V2_0_0_0 } return V1_0_0_0 } golang-github-ibm-sarama-1.43.2/describe_log_dirs_request_test.go000066400000000000000000000015371461256741300251560ustar00rootroot00000000000000package sarama import "testing" var ( emptyDescribeLogDirsRequest = []byte{255, 255, 255, 255} // Empty array (array length -1 sent) topicDescribeLogDirsRequest = []byte{ 0, 0, 0, 1, // DescribeTopics array, Array length 1 0, 6, // Topic name length 6 'r', 'a', 'n', 'd', 'o', 'm', // Topic name 0, 0, 0, 2, // PartitionIDs int32 array, Array length 2 0, 0, 0, 25, // PartitionID 25 0, 0, 0, 26, // PartitionID 26 } ) func TestDescribeLogDirsRequest(t *testing.T) { request := &DescribeLogDirsRequest{ Version: 0, DescribeTopics: []DescribeLogDirsRequestTopic{}, } testRequest(t, "no topics", request, emptyDescribeLogDirsRequest) request.DescribeTopics = []DescribeLogDirsRequestTopic{ { Topic: "random", PartitionIDs: []int32{25, 26}, }, } testRequest(t, "no topics", request, topicDescribeLogDirsRequest) } golang-github-ibm-sarama-1.43.2/describe_log_dirs_response.go000066400000000000000000000116211461256741300242600ustar00rootroot00000000000000package sarama import "time" type DescribeLogDirsResponse struct { ThrottleTime time.Duration // Version 0 and 1 are equal // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. Version int16 LogDirs []DescribeLogDirsResponseDirMetadata } func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(r.LogDirs)); err != nil { return err } for _, dir := range r.LogDirs { if err := dir.encode(pe); err != nil { return err } } return nil } func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond // Decode array of DescribeLogDirsResponseDirMetadata n, err := pd.getArrayLength() if err != nil { return err } r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n) for i := 0; i < n; i++ { dir := DescribeLogDirsResponseDirMetadata{} if err := dir.decode(pd, version); err != nil { return err } r.LogDirs[i] = dir } return nil } func (r *DescribeLogDirsResponse) key() int16 { return 35 } func (r *DescribeLogDirsResponse) version() int16 { return r.Version } func (r *DescribeLogDirsResponse) headerVersion() int16 { return 0 } func (r *DescribeLogDirsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { if r.Version > 0 { return V2_0_0_0 } return V1_0_0_0 } func (r *DescribeLogDirsResponse) throttleTime() time.Duration { return r.ThrottleTime } type DescribeLogDirsResponseDirMetadata struct { ErrorCode KError // The absolute log directory path Path string Topics []DescribeLogDirsResponseTopic } func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { pe.putInt16(int16(r.ErrorCode)) if err := pe.putString(r.Path); err != nil { return err } if err := pe.putArrayLength(len(r.Topics)); err != nil { return err } for _, topic := range r.Topics { if err := topic.encode(pe); err != nil { return err } } return nil } func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error { errCode, err := pd.getInt16() if err != nil { return err } r.ErrorCode = KError(errCode) path, err := pd.getString() if err != nil { return err } r.Path = path // Decode array of DescribeLogDirsResponseTopic n, err := pd.getArrayLength() if err != nil { return err } r.Topics = make([]DescribeLogDirsResponseTopic, n) for i := 0; i < n; i++ { t := DescribeLogDirsResponseTopic{} if err := t.decode(pd, version); err != nil { return err } r.Topics[i] = t } return nil } // DescribeLogDirsResponseTopic contains a topic's partitions descriptions type DescribeLogDirsResponseTopic struct { Topic string Partitions []DescribeLogDirsResponsePartition } func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { if err := pe.putString(r.Topic); err != nil { return err } if err := pe.putArrayLength(len(r.Partitions)); err != nil { return err } for _, partition := range r.Partitions { if err := partition.encode(pe); err != nil { return err } } return nil } func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error { t, err := pd.getString() if err != nil { return err } r.Topic = t n, err := pd.getArrayLength() if err != nil { return err } r.Partitions = make([]DescribeLogDirsResponsePartition, n) for i := 0; i < n; i++ { p := DescribeLogDirsResponsePartition{} if err := p.decode(pd, version); err != nil { return err } r.Partitions[i] = p } return nil } // DescribeLogDirsResponsePartition describes a partition's log directory type DescribeLogDirsResponsePartition struct { PartitionID int32 // The size of the log segments of the partition in bytes. Size int64 // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or // current replica's LEO (if it is the future log for the partition) OffsetLag int64 // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of // the replica in the future. IsTemporary bool } func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error { pe.putInt32(r.PartitionID) pe.putInt64(r.Size) pe.putInt64(r.OffsetLag) pe.putBool(r.IsTemporary) return nil } func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error { pID, err := pd.getInt32() if err != nil { return err } r.PartitionID = pID size, err := pd.getInt64() if err != nil { return err } r.Size = size lag, err := pd.getInt64() if err != nil { return err } r.OffsetLag = lag isTemp, err := pd.getBool() if err != nil { return err } r.IsTemporary = isTemp return nil } golang-github-ibm-sarama-1.43.2/describe_log_dirs_response_test.go000066400000000000000000000040361461256741300253210ustar00rootroot00000000000000package sarama import ( "testing" ) var ( describeLogDirsResponseEmpty = []byte{ 0, 0, 0, 0, // no throttle time 0, 0, 0, 0, // no log dirs } describeLogDirsResponseTwoPartitions = []byte{ 0, 0, 0, 0, // no throttle time 0, 0, 0, 1, // One describe log dir (array length) 0, 0, // No error code 0, 6, // Character length of path (6 chars) '/', 'k', 'a', 'f', 'k', 'a', 0, 0, 0, 1, // One DescribeLogDirsResponseTopic (array length) 0, 6, // Character length of "random" topic (6 chars) 'r', 'a', 'n', 'd', 'o', 'm', // Topic name 0, 0, 0, 2, // Two DescribeLogDirsResponsePartition (array length) 0, 0, 0, 25, // PartitionID 25 0, 0, 0, 0, 0, 0, 0, 125, // Log Size 0, 0, 0, 0, 0, 0, 0, 0, // OffsetLag 0, // IsTemporary = false 0, 0, 0, 26, // PartitionID 25 0, 0, 0, 0, 0, 0, 0, 100, // Log Size 0, 0, 0, 0, 0, 0, 0, 0, // OffsetLag 0, // IsTemporary = false } ) func TestDescribeLogDirsResponse(t *testing.T) { // Test empty response response := &DescribeLogDirsResponse{ LogDirs: []DescribeLogDirsResponseDirMetadata{}, } testVersionDecodable(t, "empty", response, describeLogDirsResponseEmpty, 0) if len(response.LogDirs) != 0 { t.Error("Expected no log dirs") } response.LogDirs = []DescribeLogDirsResponseDirMetadata{ { ErrorCode: 0, Path: "/kafka", Topics: []DescribeLogDirsResponseTopic{ { Topic: "random", Partitions: []DescribeLogDirsResponsePartition{ { PartitionID: 25, Size: 125, OffsetLag: 0, IsTemporary: false, }, { PartitionID: 26, Size: 100, OffsetLag: 0, IsTemporary: false, }, }, }, }, }, } testVersionDecodable(t, "two partitions", response, describeLogDirsResponseTwoPartitions, 0) if len(response.LogDirs) != 1 { t.Error("Expected one log dirs") } if len(response.LogDirs[0].Topics) != 1 { t.Error("Expected one topic in log dirs") } if len(response.LogDirs[0].Topics[0].Partitions) != 2 { t.Error("Expected two partitions") } } golang-github-ibm-sarama-1.43.2/describe_user_scram_credentials_request.go000066400000000000000000000034711461256741300270340ustar00rootroot00000000000000package sarama // DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names type DescribeUserScramCredentialsRequest struct { // Version 0 is currently only supported Version int16 // If this is an empty array, all users will be queried DescribeUsers []DescribeUserScramCredentialsRequestUser } // DescribeUserScramCredentialsRequestUser is a describe request about specific user name type DescribeUserScramCredentialsRequestUser struct { Name string } func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error { pe.putCompactArrayLength(len(r.DescribeUsers)) for _, d := range r.DescribeUsers { if err := pe.putCompactString(d.Name); err != nil { return err } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { n, err := pd.getCompactArrayLength() if err != nil { return err } if n == -1 { n = 0 } r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n) for i := 0; i < n; i++ { r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{} if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil { return err } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *DescribeUserScramCredentialsRequest) key() int16 { return 50 } func (r *DescribeUserScramCredentialsRequest) version() int16 { return r.Version } func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 { return 2 } func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool { return r.Version == 0 } func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } golang-github-ibm-sarama-1.43.2/describe_user_scram_credentials_request_test.go000066400000000000000000000015121461256741300300650ustar00rootroot00000000000000package sarama import "testing" var ( emptyDescribeUserScramCredentialsRequest = []byte{ 1, 0, // empty tagged fields } userDescribeUserScramCredentialsRequest = []byte{ 2, // DescribeUsers array, Array length 1 7, // User name length 6 'r', 'a', 'n', 'd', 'o', 'm', // User name 0, 0, // empty tagged fields } ) func TestDescribeUserScramCredentialsRequest(t *testing.T) { request := &DescribeUserScramCredentialsRequest{ Version: 0, DescribeUsers: []DescribeUserScramCredentialsRequestUser{}, } testRequest(t, "no users", request, emptyDescribeUserScramCredentialsRequest) request.DescribeUsers = []DescribeUserScramCredentialsRequestUser{ { Name: "random", }, } testRequest(t, "single user", request, userDescribeUserScramCredentialsRequest) } golang-github-ibm-sarama-1.43.2/describe_user_scram_credentials_response.go000066400000000000000000000077421461256741300272070ustar00rootroot00000000000000package sarama import "time" type ScramMechanismType int8 const ( SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0 SCRAM_MECHANISM_SHA_256 // 1 SCRAM_MECHANISM_SHA_512 // 2 ) func (s ScramMechanismType) String() string { switch s { case 1: return SASLTypeSCRAMSHA256 case 2: return SASLTypeSCRAMSHA512 default: return "Unknown" } } type DescribeUserScramCredentialsResponse struct { // Version 0 is currently only supported Version int16 ThrottleTime time.Duration ErrorCode KError ErrorMessage *string Results []*DescribeUserScramCredentialsResult } type DescribeUserScramCredentialsResult struct { User string ErrorCode KError ErrorMessage *string CredentialInfos []*UserScramCredentialsResponseInfo } type UserScramCredentialsResponseInfo struct { Mechanism ScramMechanismType Iterations int32 } func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) pe.putInt16(int16(r.ErrorCode)) if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { return err } pe.putCompactArrayLength(len(r.Results)) for _, u := range r.Results { if err := pe.putCompactString(u.User); err != nil { return err } pe.putInt16(int16(u.ErrorCode)) if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { return err } pe.putCompactArrayLength(len(u.CredentialInfos)) for _, c := range u.CredentialInfos { pe.putInt8(int8(c.Mechanism)) pe.putInt32(c.Iterations) pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond kerr, err := pd.getInt16() if err != nil { return err } r.ErrorCode = KError(kerr) if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { return err } numUsers, err := pd.getCompactArrayLength() if err != nil { return err } if numUsers > 0 { r.Results = make([]*DescribeUserScramCredentialsResult, numUsers) for i := 0; i < numUsers; i++ { r.Results[i] = &DescribeUserScramCredentialsResult{} if r.Results[i].User, err = pd.getCompactString(); err != nil { return err } errorCode, err := pd.getInt16() if err != nil { return err } r.Results[i].ErrorCode = KError(errorCode) if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { return err } numCredentialInfos, err := pd.getCompactArrayLength() if err != nil { return err } r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos) for j := 0; j < numCredentialInfos; j++ { r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{} scramMechanism, err := pd.getInt8() if err != nil { return err } r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism) if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil { return err } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *DescribeUserScramCredentialsResponse) key() int16 { return 50 } func (r *DescribeUserScramCredentialsResponse) version() int16 { return r.Version } func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 { return 2 } func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool { return r.Version == 0 } func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/describe_user_scram_credentials_response_test.go000066400000000000000000000027651461256741300302460ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( emptyDescribeUserScramCredentialsResponse = []byte{ 0, 0, 11, 184, // throttle time (3000 ms) 0, 0, // no error code 0, // no error message 1, // empty array 0, // tagged fields } userDescribeUserScramCredentialsResponse = []byte{ 0, 0, 11, 184, // throttle time (3000 ms) 0, 11, // Error Code 6, 'e', 'r', 'r', 'o', 'r', // ErrorMessage 2, // Results array length 7, 'n', 'o', 'b', 'o', 'd', 'y', // User 0, 13, // User ErrorCode 11, 'e', 'r', 'r', 'o', 'r', '_', 'u', 's', 'e', 'r', // User ErrorMessage 2, // CredentialInfos array length 2, // Mechanism 0, 0, 16, 0, // Iterations 0, 0, 0, } ) func TestDescribeUserScramCredentialsResponse(t *testing.T) { response := &DescribeUserScramCredentialsResponse{ Version: 0, ThrottleTime: time.Second * 3, } testResponse(t, "empty", response, emptyDescribeUserScramCredentialsResponse) responseErrorMessage := "error" responseUserErrorMessage := "error_user" response.ErrorCode = 11 response.ErrorMessage = &responseErrorMessage response.Results = append(response.Results, &DescribeUserScramCredentialsResult{ User: "nobody", ErrorCode: 13, ErrorMessage: &responseUserErrorMessage, CredentialInfos: []*UserScramCredentialsResponseInfo{ { Mechanism: SCRAM_MECHANISM_SHA_512, Iterations: 4096, }, }, }) testResponse(t, "empty", response, userDescribeUserScramCredentialsResponse) } golang-github-ibm-sarama-1.43.2/dev.yml000066400000000000000000000001631461256741300176510ustar00rootroot00000000000000name: sarama up: - go: version: '1.17.6' commands: test: run: make test desc: 'run unit tests' golang-github-ibm-sarama-1.43.2/docker-compose.yml000066400000000000000000000251061461256741300220110ustar00rootroot00000000000000version: '3.9' services: zookeeper-1: hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' init: true restart: always environment: ZOO_MY_ID: '1' ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' ZOO_INIT_LIMIT: '10' ZOO_SYNC_LIMIT: '5' ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' init: true restart: always environment: ZOO_MY_ID: '2' ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' ZOO_INIT_LIMIT: '10' ZOO_SYNC_LIMIT: '5' ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' init: true restart: always environment: ZOO_MY_ID: '3' ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' ZOO_INIT_LIMIT: '10' ZOO_SYNC_LIMIT: '5' ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: hostname: 'kafka-1' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' init: true build: context: . dockerfile: Dockerfile.kafka args: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091', ] interval: 15s timeout: 15s retries: 10 start_period: 360s depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 - toxiproxy restart: always environment: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '1' KAFKA_CFG_BROKER_RACK: '1' KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: hostname: 'kafka-2' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' init: true build: context: . dockerfile: Dockerfile.kafka args: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091', ] interval: 15s timeout: 15s retries: 10 start_period: 360s depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 - toxiproxy restart: always environment: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '2' KAFKA_CFG_BROKER_RACK: '2' KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: hostname: 'kafka-3' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' init: true build: context: . dockerfile: Dockerfile.kafka args: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091', ] interval: 15s timeout: 15s retries: 10 start_period: 360s depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 - toxiproxy restart: always environment: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '3' KAFKA_CFG_BROKER_RACK: '3' KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: hostname: 'kafka-4' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' init: true build: context: . dockerfile: Dockerfile.kafka args: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091', ] interval: 15s timeout: 15s retries: 10 start_period: 360s depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 - toxiproxy restart: always environment: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '4' KAFKA_CFG_BROKER_RACK: '4' KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: hostname: 'kafka-5' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' init: true build: context: . dockerfile: Dockerfile.kafka args: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091', ] interval: 15s timeout: 15s retries: 10 start_period: 360s depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 - toxiproxy restart: always environment: KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '5' KAFKA_CFG_BROKER_RACK: '5' KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' init: true healthcheck: test: ['CMD', '/toxiproxy-cli', 'l'] interval: 15s timeout: 15s retries: 3 start_period: 30s ports: # The tests themselves actually start the proxies on these ports - '29091:29091' - '29092:29092' - '29093:29093' - '29094:29094' - '29095:29095' # This is the toxiproxy API port - '8474:8474' golang-github-ibm-sarama-1.43.2/encoder_decoder.go000066400000000000000000000042521461256741300220060ustar00rootroot00000000000000package sarama import ( "fmt" "github.com/rcrowley/go-metrics" ) // Encoder is the interface that wraps the basic Encode method. // Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. type encoder interface { encode(pe packetEncoder) error } type encoderWithHeader interface { encoder headerVersion() int16 } // Encode takes an Encoder and turns it into bytes while potentially recording metrics. func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { return nil, nil } var prepEnc prepEncoder var realEnc realEncoder err := e.encode(&prepEnc) if err != nil { return nil, err } if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} } realEnc.raw = make([]byte, prepEnc.length) realEnc.registry = metricRegistry err = e.encode(&realEnc) if err != nil { return nil, err } return realEnc.raw, nil } // decoder is the interface that wraps the basic Decode method. // Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. type decoder interface { decode(pd packetDecoder) error } type versionedDecoder interface { decode(pd packetDecoder, version int16) error } // decode takes bytes and a decoder and fills the fields of the decoder from the bytes, // interpreted using Kafka's encoding rules. func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error { if buf == nil { return nil } helper := realDecoder{ raw: buf, registry: metricRegistry, } err := in.decode(&helper) if err != nil { return err } if helper.off != len(buf) { return PacketDecodingError{"invalid length"} } return nil } func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error { if buf == nil { return nil } helper := realDecoder{ raw: buf, registry: metricRegistry, } err := in.decode(&helper, version) if err != nil { return err } if helper.off != len(buf) { return PacketDecodingError{ Info: fmt.Sprintf("invalid length (off=%d, len=%d)", helper.off, len(buf)), } } return nil } golang-github-ibm-sarama-1.43.2/encoder_decoder_fuzz_test.go000066400000000000000000000024161461256741300241230ustar00rootroot00000000000000//go:build go1.18 package sarama import ( "bytes" "testing" ) func FuzzDecodeEncodeProduceRequest(f *testing.F) { for _, seed := range [][]byte{ produceRequestEmpty, produceRequestHeader, produceRequestOneMessage, produceRequestOneRecord, } { f.Add(seed) } f.Fuzz(func(t *testing.T, in []byte) { for i := int16(0); i < 8; i++ { req := &ProduceRequest{} err := versionedDecode(in, req, i, nil) if err != nil { continue } out, err := encode(req, nil) if err != nil { t.Logf("%v: encode: %v", in, err) continue } if !bytes.Equal(in, out) { t.Logf("%v: not equal after round trip: %v", in, out) } } }) } func FuzzDecodeEncodeFetchRequest(f *testing.F) { for _, seed := range [][]byte{ fetchRequestNoBlocks, fetchRequestWithProperties, fetchRequestOneBlock, fetchRequestOneBlockV4, fetchRequestOneBlockV11, } { f.Add(seed) } f.Fuzz(func(t *testing.T, in []byte) { for i := int16(0); i < 11; i++ { req := &FetchRequest{} err := versionedDecode(in, req, i, nil) if err != nil { continue } out, err := encode(req, nil) if err != nil { t.Logf("%v: encode: %v", in, err) continue } if !bytes.Equal(in, out) { t.Logf("%v: not equal after round trip: %v", in, out) } } }) } golang-github-ibm-sarama-1.43.2/end_txn_request.go000066400000000000000000000023101461256741300221020ustar00rootroot00000000000000package sarama type EndTxnRequest struct { Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 TransactionResult bool } func (a *EndTxnRequest) encode(pe packetEncoder) error { if err := pe.putString(a.TransactionalID); err != nil { return err } pe.putInt64(a.ProducerID) pe.putInt16(a.ProducerEpoch) pe.putBool(a.TransactionResult) return nil } func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { if a.TransactionalID, err = pd.getString(); err != nil { return err } if a.ProducerID, err = pd.getInt64(); err != nil { return err } if a.ProducerEpoch, err = pd.getInt16(); err != nil { return err } if a.TransactionResult, err = pd.getBool(); err != nil { return err } return nil } func (a *EndTxnRequest) key() int16 { return 26 } func (a *EndTxnRequest) version() int16 { return a.Version } func (r *EndTxnRequest) headerVersion() int16 { return 1 } func (a *EndTxnRequest) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *EndTxnRequest) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 default: return V0_11_0_0 } } golang-github-ibm-sarama-1.43.2/end_txn_request_test.go000066400000000000000000000005241461256741300231460ustar00rootroot00000000000000package sarama import "testing" var endTxnRequest = []byte{ 0, 3, 't', 'x', 'n', 0, 0, 0, 0, 0, 0, 31, 64, 0, 1, 1, } func TestEndTxnRequest(t *testing.T) { req := &EndTxnRequest{ TransactionalID: "txn", ProducerID: 8000, ProducerEpoch: 1, TransactionResult: true, } testRequest(t, "", req, endTxnRequest) } golang-github-ibm-sarama-1.43.2/end_txn_response.go000066400000000000000000000021451461256741300222560ustar00rootroot00000000000000package sarama import ( "time" ) type EndTxnResponse struct { Version int16 ThrottleTime time.Duration Err KError } func (e *EndTxnResponse) encode(pe packetEncoder) error { pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) pe.putInt16(int16(e.Err)) return nil } func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond kerr, err := pd.getInt16() if err != nil { return err } e.Err = KError(kerr) return nil } func (e *EndTxnResponse) key() int16 { return 26 } func (e *EndTxnResponse) version() int16 { return e.Version } func (r *EndTxnResponse) headerVersion() int16 { return 0 } func (e *EndTxnResponse) isValidVersion() bool { return e.Version >= 0 && e.Version <= 2 } func (e *EndTxnResponse) requiredVersion() KafkaVersion { switch e.Version { case 2: return V2_7_0_0 case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *EndTxnResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/end_txn_response_test.go000066400000000000000000000004561461256741300233200ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var endTxnResponse = []byte{ 0, 0, 0, 100, 0, 49, } func TestEndTxnResponse(t *testing.T) { resp := &EndTxnResponse{ ThrottleTime: 100 * time.Millisecond, Err: ErrInvalidProducerIDMapping, } testResponse(t, "", resp, endTxnResponse) } golang-github-ibm-sarama-1.43.2/entrypoint.sh000077500000000000000000000016751461256741300211330ustar00rootroot00000000000000#!/bin/bash set -eu set -o pipefail KAFKA_VERSION="${KAFKA_VERSION:-3.6.0}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME exit 1 fi cd "${KAFKA_HOME}" || exit 1 # discard all empty/commented lines from default config and copy to /tmp sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties echo "########################################################################" >>/tmp/server.properties # emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka for var in "${!KAFKA_CFG_@}"; do key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" sed -e '/^'$key'/d' -i"" /tmp/server.properties value="${!var}" echo "$key=$value" >>/tmp/server.properties done sort /tmp/server.properties exec bin/kafka-server-start.sh /tmp/server.properties golang-github-ibm-sarama-1.43.2/errors.go000066400000000000000000000632261461256741300202240ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "strings" "github.com/hashicorp/go-multierror" ) // ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored // or otherwise failed to respond. var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to") // ErrBrokerNotFound is the error returned when there's no broker found for the requested ID. var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found") // ErrClosedClient is the error returned when a method is called on a client that has been closed. var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") // ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does // not contain the expected information. var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") // ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index // (meaning one outside of the range [0...numPartitions-1]). var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") // ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") // ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. var ErrNotConnected = errors.New("kafka: broker not connected") // ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected // when requesting messages, since as an optimization the server is allowed to return a partial message at the end // of the message set. var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") // ErrShuttingDown is returned when a producer receives a message during shutdown. var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") // ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") // ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing // a RecordBatch. var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") // ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version // is lower than 0.10.0.0. var ErrControllerNotAvailable = errors.New("kafka: controller is not available") // ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update // the metadata. var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") // ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided") // ErrReassignPartitions is returned when altering partition assignments for a topic fails var ErrReassignPartitions = errors.New("failed to reassign partitions for topic") // ErrDeleteRecords is the type of error returned when fail to delete the required records var ErrDeleteRecords = errors.New("kafka server: failed to delete records") // ErrCreateACLs is the type of error returned when ACL creation failed var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules") // ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction") // ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction") // ErrTransactionNotReady when transaction status is invalid for the current action. var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready") // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") // ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error") // ErrTxnUnableToParseResponse when response is nil var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The // default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { return es[0].Error() } points := make([]string, len(es)) for i, err := range es { points[i] = fmt.Sprintf("* %s", err) } return fmt.Sprintf( "%d errors occurred:\n\t%s\n", len(es), strings.Join(points, "\n\t")) } type sentinelError struct { sentinel error wrapped error } func (err sentinelError) Error() string { if err.wrapped != nil { return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped) } else { return fmt.Sprintf("%s", err.sentinel) } } func (err sentinelError) Is(target error) bool { return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target) } func (err sentinelError) Unwrap() error { return err.wrapped } func Wrap(sentinel error, wrapped ...error) sentinelError { return sentinelError{sentinel: sentinel, wrapped: multiError(wrapped...)} } func multiError(wrapped ...error) error { merr := multierror.Append(nil, wrapped...) if MultiErrorFormat != nil { merr.ErrorFormat = MultiErrorFormat } return merr.ErrorOrNil() } // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { Info string } func (err PacketEncodingError) Error() string { return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) } // PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. // This can be a bad CRC or length field, or any other invalid value. type PacketDecodingError struct { Info string } func (err PacketDecodingError) Error() string { return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) } // ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) // when the specified configuration is invalid. type ConfigurationError string func (err ConfigurationError) Error() string { return "kafka: invalid configuration (" + string(err) + ")" } // KError is the type of error that can be returned directly by the Kafka broker. // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes type KError int16 // Numeric error codes returned by the Kafka server. const ( ErrUnknown KError = -1 // Errors.UNKNOWN_SERVER_ERROR ErrNoError KError = 0 // Errors.NONE ErrOffsetOutOfRange KError = 1 // Errors.OFFSET_OUT_OF_RANGE ErrInvalidMessage KError = 2 // Errors.CORRUPT_MESSAGE ErrUnknownTopicOrPartition KError = 3 // Errors.UNKNOWN_TOPIC_OR_PARTITION ErrInvalidMessageSize KError = 4 // Errors.INVALID_FETCH_SIZE ErrLeaderNotAvailable KError = 5 // Errors.LEADER_NOT_AVAILABLE ErrNotLeaderForPartition KError = 6 // Errors.NOT_LEADER_OR_FOLLOWER ErrRequestTimedOut KError = 7 // Errors.REQUEST_TIMED_OUT ErrBrokerNotAvailable KError = 8 // Errors.BROKER_NOT_AVAILABLE ErrReplicaNotAvailable KError = 9 // Errors.REPLICA_NOT_AVAILABLE ErrMessageSizeTooLarge KError = 10 // Errors.MESSAGE_TOO_LARGE ErrStaleControllerEpochCode KError = 11 // Errors.STALE_CONTROLLER_EPOCH ErrOffsetMetadataTooLarge KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE ErrNetworkException KError = 13 // Errors.NETWORK_EXCEPTION ErrOffsetsLoadInProgress KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS ErrConsumerCoordinatorNotAvailable KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE ErrNotCoordinatorForConsumer KError = 16 // Errors.NOT_COORDINATOR ErrInvalidTopic KError = 17 // Errors.INVALID_TOPIC_EXCEPTION ErrMessageSetSizeTooLarge KError = 18 // Errors.RECORD_LIST_TOO_LARGE ErrNotEnoughReplicas KError = 19 // Errors.NOT_ENOUGH_REPLICAS ErrNotEnoughReplicasAfterAppend KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND ErrInvalidRequiredAcks KError = 21 // Errors.INVALID_REQUIRED_ACKS ErrIllegalGeneration KError = 22 // Errors.ILLEGAL_GENERATION ErrInconsistentGroupProtocol KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL ErrInvalidGroupId KError = 24 // Errors.INVALID_GROUP_ID ErrUnknownMemberId KError = 25 // Errors.UNKNOWN_MEMBER_ID ErrInvalidSessionTimeout KError = 26 // Errors.INVALID_SESSION_TIMEOUT ErrRebalanceInProgress KError = 27 // Errors.REBALANCE_IN_PROGRESS ErrInvalidCommitOffsetSize KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE ErrTopicAuthorizationFailed KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED ErrGroupAuthorizationFailed KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED ErrClusterAuthorizationFailed KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED ErrInvalidTimestamp KError = 32 // Errors.INVALID_TIMESTAMP ErrUnsupportedSASLMechanism KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM ErrIllegalSASLState KError = 34 // Errors.ILLEGAL_SASL_STATE ErrUnsupportedVersion KError = 35 // Errors.UNSUPPORTED_VERSION ErrTopicAlreadyExists KError = 36 // Errors.TOPIC_ALREADY_EXISTS ErrInvalidPartitions KError = 37 // Errors.INVALID_PARTITIONS ErrInvalidReplicationFactor KError = 38 // Errors.INVALID_REPLICATION_FACTOR ErrInvalidReplicaAssignment KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT ErrInvalidConfig KError = 40 // Errors.INVALID_CONFIG ErrNotController KError = 41 // Errors.NOT_CONTROLLER ErrInvalidRequest KError = 42 // Errors.INVALID_REQUEST ErrUnsupportedForMessageFormat KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT ErrPolicyViolation KError = 44 // Errors.POLICY_VIOLATION ErrOutOfOrderSequenceNumber KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER ErrDuplicateSequenceNumber KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER ErrInvalidProducerEpoch KError = 47 // Errors.INVALID_PRODUCER_EPOCH ErrInvalidTxnState KError = 48 // Errors.INVALID_TXN_STATE ErrInvalidProducerIDMapping KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING ErrInvalidTransactionTimeout KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT ErrConcurrentTransactions KError = 51 // Errors.CONCURRENT_TRANSACTIONS ErrTransactionCoordinatorFenced KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED ErrSecurityDisabled KError = 54 // Errors.SECURITY_DISABLED ErrOperationNotAttempted KError = 55 // Errors.OPERATION_NOT_ATTEMPTED ErrKafkaStorageError KError = 56 // Errors.KAFKA_STORAGE_ERROR ErrLogDirNotFound KError = 57 // Errors.LOG_DIR_NOT_FOUND ErrSASLAuthenticationFailed KError = 58 // Errors.SASL_AUTHENTICATION_FAILED ErrUnknownProducerID KError = 59 // Errors.UNKNOWN_PRODUCER_ID ErrReassignmentInProgress KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS ErrDelegationTokenAuthDisabled KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED ErrDelegationTokenNotFound KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND ErrDelegationTokenOwnerMismatch KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH ErrDelegationTokenRequestNotAllowed KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED ErrDelegationTokenExpired KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED ErrInvalidPrincipalType KError = 67 // Errors.INVALID_PRINCIPAL_TYPE ErrNonEmptyGroup KError = 68 // Errors.NON_EMPTY_GROUP ErrGroupIDNotFound KError = 69 // Errors.GROUP_ID_NOT_FOUND ErrFetchSessionIDNotFound KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND ErrInvalidFetchSessionEpoch KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH ErrListenerNotFound KError = 72 // Errors.LISTENER_NOT_FOUND ErrTopicDeletionDisabled KError = 73 // Errors.TOPIC_DELETION_DISABLED ErrFencedLeaderEpoch KError = 74 // Errors.FENCED_LEADER_EPOCH ErrUnknownLeaderEpoch KError = 75 // Errors.UNKNOWN_LEADER_EPOCH ErrUnsupportedCompressionType KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE ErrStaleBrokerEpoch KError = 77 // Errors.STALE_BROKER_EPOCH ErrOffsetNotAvailable KError = 78 // Errors.OFFSET_NOT_AVAILABLE ErrMemberIdRequired KError = 79 // Errors.MEMBER_ID_REQUIRED ErrPreferredLeaderNotAvailable KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE ErrGroupMaxSizeReached KError = 81 // Errors.GROUP_MAX_SIZE_REACHED ErrFencedInstancedId KError = 82 // Errors.FENCED_INSTANCE_ID ErrEligibleLeadersNotAvailable KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE ErrElectionNotNeeded KError = 84 // Errors.ELECTION_NOT_NEEDED ErrNoReassignmentInProgress KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS ErrGroupSubscribedToTopic KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC ErrInvalidRecord KError = 87 // Errors.INVALID_RECORD ErrUnstableOffsetCommit KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT ErrThrottlingQuotaExceeded KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED ErrProducerFenced KError = 90 // Errors.PRODUCER_FENCED ) func (err KError) Error() string { // Error messages stolen/adapted from // https://kafka.apache.org/protocol#protocol_error_codes switch err { case ErrNoError: return "kafka server: Not an error, why are you printing me?" case ErrUnknown: return "kafka server: Unexpected (unknown?) server error" case ErrOffsetOutOfRange: return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition" case ErrInvalidMessage: return "kafka server: Message contents does not match its CRC" case ErrUnknownTopicOrPartition: return "kafka server: Request was for a topic or partition that does not exist on this broker" case ErrInvalidMessageSize: return "kafka server: The message has a negative size" case ErrLeaderNotAvailable: return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes" case ErrNotLeaderForPartition: return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date" case ErrRequestTimedOut: return "kafka server: Request exceeded the user-specified time limit in the request" case ErrBrokerNotAvailable: return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" case ErrReplicaNotAvailable: return "kafka server: Replica information not available, one or more brokers are down" case ErrMessageSizeTooLarge: return "kafka server: Message was too large, server rejected it to avoid allocation error" case ErrStaleControllerEpochCode: return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)" case ErrOffsetMetadataTooLarge: return "kafka server: Specified a string larger than the configured maximum for offset metadata" case ErrNetworkException: return "kafka server: The server disconnected before a response was received" case ErrOffsetsLoadInProgress: return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: return "kafka server: Offset's topic has not yet been created" case ErrNotCoordinatorForConsumer: return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: return "kafka server: The request attempted to perform an operation on an invalid topic" case ErrMessageSetSizeTooLarge: return "kafka server: The request included message batch larger than the configured segment size on the server" case ErrNotEnoughReplicas: return "kafka server: Messages are rejected since there are fewer in-sync replicas than required" case ErrNotEnoughReplicasAfterAppend: return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required" case ErrInvalidRequiredAcks: return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)" case ErrIllegalGeneration: return "kafka server: The provided generation id is not the current generation" case ErrInconsistentGroupProtocol: return "kafka server: The provider group protocol type is incompatible with the other members" case ErrInvalidGroupId: return "kafka server: The provided group id was empty" case ErrUnknownMemberId: return "kafka server: The provided member is not known in the current generation" case ErrInvalidSessionTimeout: return "kafka server: The provided session timeout is outside the allowed range" case ErrRebalanceInProgress: return "kafka server: A rebalance for the group is in progress. Please re-join the group" case ErrInvalidCommitOffsetSize: return "kafka server: The provided commit metadata was too large" case ErrTopicAuthorizationFailed: return "kafka server: The client is not authorized to access this topic" case ErrGroupAuthorizationFailed: return "kafka server: The client is not authorized to access this group" case ErrClusterAuthorizationFailed: return "kafka server: The client is not authorized to send this request type" case ErrInvalidTimestamp: return "kafka server: The timestamp of the message is out of acceptable range" case ErrUnsupportedSASLMechanism: return "kafka server: The broker does not support the requested SASL mechanism" case ErrIllegalSASLState: return "kafka server: Request is not valid given the current SASL state" case ErrUnsupportedVersion: return "kafka server: The version of API is not supported" case ErrTopicAlreadyExists: return "kafka server: Topic with this name already exists" case ErrInvalidPartitions: return "kafka server: Number of partitions is invalid" case ErrInvalidReplicationFactor: return "kafka server: Replication-factor is invalid" case ErrInvalidReplicaAssignment: return "kafka server: Replica assignment is invalid" case ErrInvalidConfig: return "kafka server: Configuration is invalid" case ErrNotController: return "kafka server: This is not the correct controller for this cluster" case ErrInvalidRequest: return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details" case ErrUnsupportedForMessageFormat: return "kafka server: The requested operation is not supported by the message format version" case ErrPolicyViolation: return "kafka server: Request parameters do not satisfy the configured policy" case ErrOutOfOrderSequenceNumber: return "kafka server: The broker received an out of order sequence number" case ErrDuplicateSequenceNumber: return "kafka server: The broker received a duplicate sequence number" case ErrInvalidProducerEpoch: return "kafka server: Producer attempted an operation with an old epoch" case ErrInvalidTxnState: return "kafka server: The producer attempted a transactional operation in an invalid state" case ErrInvalidProducerIDMapping: return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id" case ErrInvalidTransactionTimeout: return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" case ErrConcurrentTransactions: return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" case ErrTransactionCoordinatorFenced: return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" case ErrTransactionalIDAuthorizationFailed: return "kafka server: Transactional ID authorization failed" case ErrSecurityDisabled: return "kafka server: Security features are disabled" case ErrOperationNotAttempted: return "kafka server: The broker did not attempt to execute this operation" case ErrKafkaStorageError: return "kafka server: Disk error when trying to access log file on the disk" case ErrLogDirNotFound: return "kafka server: The specified log directory is not found in the broker config" case ErrSASLAuthenticationFailed: return "kafka server: SASL Authentication failed" case ErrUnknownProducerID: return "kafka server: The broker could not locate the producer metadata associated with the Producer ID" case ErrReassignmentInProgress: return "kafka server: A partition reassignment is in progress" case ErrDelegationTokenAuthDisabled: return "kafka server: Delegation Token feature is not enabled" case ErrDelegationTokenNotFound: return "kafka server: Delegation Token is not found on server" case ErrDelegationTokenOwnerMismatch: return "kafka server: Specified Principal is not valid Owner/Renewer" case ErrDelegationTokenRequestNotAllowed: return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels" case ErrDelegationTokenAuthorizationFailed: return "kafka server: Delegation Token authorization failed" case ErrDelegationTokenExpired: return "kafka server: Delegation Token is expired" case ErrInvalidPrincipalType: return "kafka server: Supplied principalType is not supported" case ErrNonEmptyGroup: return "kafka server: The group is not empty" case ErrGroupIDNotFound: return "kafka server: The group id does not exist" case ErrFetchSessionIDNotFound: return "kafka server: The fetch session ID was not found" case ErrInvalidFetchSessionEpoch: return "kafka server: The fetch session epoch is invalid" case ErrListenerNotFound: return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed" case ErrTopicDeletionDisabled: return "kafka server: Topic deletion is disabled" case ErrFencedLeaderEpoch: return "kafka server: The leader epoch in the request is older than the epoch on the broker" case ErrUnknownLeaderEpoch: return "kafka server: The leader epoch in the request is newer than the epoch on the broker" case ErrUnsupportedCompressionType: return "kafka server: The requesting client does not support the compression type of given partition" case ErrStaleBrokerEpoch: return "kafka server: Broker epoch has changed" case ErrOffsetNotAvailable: return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing" case ErrMemberIdRequired: return "kafka server: The group member needs to have a valid member id before actually entering a consumer group" case ErrPreferredLeaderNotAvailable: return "kafka server: The preferred leader was not available" case ErrGroupMaxSizeReached: return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members" case ErrFencedInstancedId: return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id" case ErrEligibleLeadersNotAvailable: return "kafka server: Eligible topic partition leaders are not available" case ErrElectionNotNeeded: return "kafka server: Leader election not needed for topic partition" case ErrNoReassignmentInProgress: return "kafka server: No partition reassignment is in progress" case ErrGroupSubscribedToTopic: return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it" case ErrInvalidRecord: return "kafka server: This record has failed the validation on broker and hence will be rejected" case ErrUnstableOffsetCommit: return "kafka server: There are unstable offsets that need to be cleared" } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) } golang-github-ibm-sarama-1.43.2/errors_test.go000066400000000000000000000031131461256741300212500ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "net" "testing" ) func TestSentinelWithSingleWrappedError(t *testing.T) { t.Parallel() myNetError := &net.OpError{Op: "mock", Err: errors.New("op error")} error := Wrap(ErrOutOfBrokers, myNetError) expected := fmt.Sprintf("%s: %s", ErrOutOfBrokers, myNetError) actual := error.Error() if actual != expected { t.Errorf("unexpected value '%s' vs '%v'", expected, actual) } if !errors.Is(error, ErrOutOfBrokers) { t.Error("errors.Is unexpected result") } if !errors.Is(error, myNetError) { t.Error("errors.Is unexpected result") } var opError *net.OpError if !errors.As(error, &opError) { t.Error("errors.As unexpected result") } else if opError != myNetError { t.Error("errors.As wrong value") } unwrapped := errors.Unwrap(error) if errors.Is(unwrapped, ErrOutOfBrokers) || !errors.Is(unwrapped, myNetError) { t.Errorf("unexpected unwrapped value %v vs %vs", error, unwrapped) } } func TestSentinelWithMultipleWrappedErrors(t *testing.T) { t.Parallel() myNetError := &net.OpError{} myAddrError := &net.AddrError{} error := Wrap(ErrOutOfBrokers, myNetError, myAddrError) if !errors.Is(error, ErrOutOfBrokers) { t.Error("errors.Is unexpected result") } if !errors.Is(error, myNetError) { t.Error("errors.Is unexpected result") } if !errors.Is(error, myAddrError) { t.Error("errors.Is unexpected result") } unwrapped := errors.Unwrap(error) if errors.Is(unwrapped, ErrOutOfBrokers) || !errors.Is(unwrapped, myNetError) || !errors.Is(unwrapped, myAddrError) { t.Errorf("unwrapped value unexpected result") } } golang-github-ibm-sarama-1.43.2/examples/000077500000000000000000000000001461256741300201665ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/README.md000066400000000000000000000026171461256741300214530ustar00rootroot00000000000000# Sarama examples This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama) #### HTTP server [http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://pkg.go.dev/github.com/IBM/sarama/mocks) to test both. #### Interceptors Basic example to use a producer interceptor that produces [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go/) spans and add some headers for each intercepted message. #### Transactional Producer [txn_producer](./txn_producer) Basic example to use a transactional producer that produce on some topic within a Kafka transaction. To ensure transactional-id uniqueness it implement some **_ProducerProvider_** that build a producer appending an integer that grow when producer is created. #### Exacly-once transactional paradigm [exactly_once](./exactly_once) Basic example to use a transactional producer that produce consumed message from some topics within a Kafka transaction. To ensure transactional-id uniqueness it implement some **_ProducerProvider_** that build a producer using current message topic-partition. golang-github-ibm-sarama-1.43.2/examples/consumergroup/000077500000000000000000000000001461256741300230765ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/consumergroup/README.md000066400000000000000000000005331461256741300243560ustar00rootroot00000000000000# Consumergroup example This example shows you how to use the Sarama consumer group consumer. The example simply starts consuming the given Kafka topics and logs the consumed messages. ```bash $ go run main.go -brokers="127.0.0.1:9092" -topics="sarama" -group="example" ``` You can also toggle (pause/resume) the consumption by sending SIGUSR1 golang-github-ibm-sarama-1.43.2/examples/consumergroup/go.mod000066400000000000000000000020731461256741300242060ustar00rootroot00000000000000module github.com/IBM/sarama/examples/consumer go 1.19 require github.com/IBM/sarama v1.43.1 require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/consumergroup/go.sum000066400000000000000000000200521461256741300242300ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/consumergroup/main.go000066400000000000000000000133151461256741300243540ustar00rootroot00000000000000package main // SIGUSR1 toggle the pause/resume consumption import ( "context" "errors" "flag" "log" "os" "os/signal" "strings" "sync" "syscall" "github.com/IBM/sarama" ) // Sarama configuration options var ( brokers = "" version = "" group = "" topics = "" assignor = "" oldest = true verbose = false ) func init() { flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list") flag.StringVar(&group, "group", "", "Kafka consumer group definition") flag.StringVar(&version, "version", sarama.DefaultVersion.String(), "Kafka cluster version") flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma separated list") flag.StringVar(&assignor, "assignor", "range", "Consumer group partition assignment strategy (range, roundrobin, sticky)") flag.BoolVar(&oldest, "oldest", true, "Kafka consumer consume initial offset from oldest") flag.BoolVar(&verbose, "verbose", false, "Sarama logging") flag.Parse() if len(brokers) == 0 { panic("no Kafka bootstrap brokers defined, please set the -brokers flag") } if len(topics) == 0 { panic("no topics given to be consumed, please set the -topics flag") } if len(group) == 0 { panic("no Kafka consumer group defined, please set the -group flag") } } func main() { keepRunning := true log.Println("Starting a new Sarama consumer") if verbose { sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) } version, err := sarama.ParseKafkaVersion(version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } /** * Construct a new Sarama configuration. * The Kafka cluster version has to be defined before the consumer/producer is initialized. */ config := sarama.NewConfig() config.Version = version switch assignor { case "sticky": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} case "roundrobin": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} case "range": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} default: log.Panicf("Unrecognized consumer group partition assignor: %s", assignor) } if oldest { config.Consumer.Offsets.Initial = sarama.OffsetOldest } /** * Setup a new Sarama consumer group */ consumer := Consumer{ ready: make(chan bool), } ctx, cancel := context.WithCancel(context.Background()) client, err := sarama.NewConsumerGroup(strings.Split(brokers, ","), group, config) if err != nil { log.Panicf("Error creating consumer group client: %v", err) } consumptionIsPaused := false wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be // recreated to get the new claims if err := client.Consume(ctx, strings.Split(topics, ","), &consumer); err != nil { if errors.Is(err, sarama.ErrClosedConsumerGroup) { return } log.Panicf("Error from consumer: %v", err) } // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { return } consumer.ready = make(chan bool) } }() <-consumer.ready // Await till the consumer has been set up log.Println("Sarama consumer up and running!...") sigusr1 := make(chan os.Signal, 1) signal.Notify(sigusr1, syscall.SIGUSR1) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM) for keepRunning { select { case <-ctx.Done(): log.Println("terminating: context cancelled") keepRunning = false case <-sigterm: log.Println("terminating: via signal") keepRunning = false case <-sigusr1: toggleConsumptionFlow(client, &consumptionIsPaused) } } cancel() wg.Wait() if err = client.Close(); err != nil { log.Panicf("Error closing client: %v", err) } } func toggleConsumptionFlow(client sarama.ConsumerGroup, isPaused *bool) { if *isPaused { client.ResumeAll() log.Println("Resuming consumption") } else { client.PauseAll() log.Println("Pausing consumption") } *isPaused = !*isPaused } // Consumer represents a Sarama consumer group consumer type Consumer struct { ready chan bool } // Setup is run at the beginning of a new session, before ConsumeClaim func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error { // Mark the consumer as ready close(consumer.ready) return nil } // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error { return nil } // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). // Once the Messages() channel is closed, the Handler must finish its processing // loop and exit. func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: // https://github.com/IBM/sarama/blob/main/consumer_group.go#L27-L29 for { select { case message, ok := <-claim.Messages(): if !ok { log.Printf("message channel was closed") return nil } log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic) session.MarkMessage(message, "") // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: // https://github.com/IBM/sarama/issues/1192 case <-session.Context().Done(): return nil } } } golang-github-ibm-sarama-1.43.2/examples/exactly_once/000077500000000000000000000000001461256741300226435ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/exactly_once/.gitignore000066400000000000000000000000151461256741300246270ustar00rootroot00000000000000exactly_once golang-github-ibm-sarama-1.43.2/examples/exactly_once/README.md000066400000000000000000000012201461256741300241150ustar00rootroot00000000000000# Exactly-Once example This example shows you how to use the Sarama transactional producer to ensure exacly-once paradigm with Kafka transaction. The example simply starts consuming the given Kafka topics and produce the consumed message to another topic including current message offset in procuder transaction. ```bash $ go run main.go -brokers="127.0.0.1:9092" -topics="sarama" -destination-topic="destination-sarama" -group="example" ``` To ensure transactional-id uniqueness it implement some ***ProducerProvider*** that build a producer using current message topic-partition. You can also toggle (pause/resume) the consumption by sending SIGUSR1. golang-github-ibm-sarama-1.43.2/examples/exactly_once/go.mod000066400000000000000000000020771461256741300237570ustar00rootroot00000000000000module github.com/IBM/sarama/examples/exactly_once go 1.19 require github.com/IBM/sarama v1.43.1 require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/exactly_once/go.sum000066400000000000000000000200521461256741300237750ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/exactly_once/main.go000066400000000000000000000261521461256741300241240ustar00rootroot00000000000000package main // SIGUSR1 toggle the pause/resume consumption import ( "context" "errors" "flag" "fmt" "log" "os" "os/signal" "strings" "sync" "syscall" "time" "github.com/IBM/sarama" ) // Sarama configuration options var ( brokers = "" version = "" group = "" topics = "" destinationTopic = "" oldest = true verbose = false ) func init() { flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list") flag.StringVar(&group, "group", "", "Kafka consumer group definition") flag.StringVar(&version, "version", sarama.DefaultVersion.String(), "Kafka cluster version") flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma separated list") flag.StringVar(&destinationTopic, "destination-topic", "", "Kafka topic where records will be copied from topics.") flag.StringVar(&assignor, "assignor", "range", "Consumer group partition assignment strategy (range, roundrobin, sticky)") flag.BoolVar(&oldest, "oldest", true, "Kafka consumer consume initial offset from oldest") flag.BoolVar(&verbose, "verbose", false, "Sarama logging") flag.Parse() if len(brokers) == 0 { panic("no Kafka bootstrap brokers defined, please set the -brokers flag") } if len(topics) == 0 { panic("no topics given to be consumed, please set the -topics flag") } if len(destinationTopic) == 0 { panic("no destination topics given to be consumed, please set the -destination-topics flag") } if len(group) == 0 { panic("no Kafka consumer group defined, please set the -group flag") } } func main() { keepRunning := true log.Println("Starting a new Sarama consumer") if verbose { sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) } version, err := sarama.ParseKafkaVersion(version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } /** * Construct a new Sarama configuration. * The Kafka cluster version has to be defined before the consumer/producer is initialized. */ config := sarama.NewConfig() config.Version = version switch assignor { case "sticky": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} case "roundrobin": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} case "range": config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} default: log.Panicf("Unrecognized consumer group partition assignor: %s", assignor) } if oldest { config.Consumer.Offsets.Initial = sarama.OffsetOldest } config.Consumer.IsolationLevel = sarama.ReadCommitted config.Consumer.Offsets.AutoCommit.Enable = false producerProvider := newProducerProvider(strings.Split(brokers, ","), func() *sarama.Config { producerConfig := sarama.NewConfig() producerConfig.Version = version producerConfig.Net.MaxOpenRequests = 1 producerConfig.Producer.RequiredAcks = sarama.WaitForAll producerConfig.Producer.Idempotent = true producerConfig.Producer.Transaction.ID = "sarama" return producerConfig }) /** * Setup a new Sarama consumer group */ consumer := Consumer{ groupId: group, brokers: strings.Split(brokers, ","), producerProvider: producerProvider, ready: make(chan bool), } ctx, cancel := context.WithCancel(context.Background()) client, err := sarama.NewConsumerGroup(strings.Split(brokers, ","), group, config) if err != nil { log.Panicf("Error creating consumer group client: %v", err) } consumptionIsPaused := false wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be // recreated to get the new claims if err := client.Consume(ctx, strings.Split(topics, ","), &consumer); err != nil { if errors.Is(err, sarama.ErrClosedConsumerGroup) { return } log.Panicf("Error from consumer: %v", err) } // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { return } consumer.ready = make(chan bool) } }() <-consumer.ready // Await till the consumer has been set up log.Println("Sarama consumer up and running!...") sigusr1 := make(chan os.Signal, 1) signal.Notify(sigusr1, syscall.SIGUSR1) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM) for keepRunning { select { case <-ctx.Done(): log.Println("terminating: context cancelled") keepRunning = false case <-sigterm: log.Println("terminating: via signal") keepRunning = false case <-sigusr1: toggleConsumptionFlow(client, &consumptionIsPaused) } } cancel() wg.Wait() producerProvider.clear() if err = client.Close(); err != nil { log.Panicf("Error closing client: %v", err) } } func toggleConsumptionFlow(client sarama.ConsumerGroup, isPaused *bool) { if *isPaused { client.ResumeAll() log.Println("Resuming consumption") } else { client.PauseAll() log.Println("Pausing consumption") } *isPaused = !*isPaused } // Consumer represents a Sarama consumer group consumer type Consumer struct { ready chan bool groupId string brokers []string producerProvider *producerProvider } // Setup is run at the beginning of a new session, before ConsumeClaim func (consumer *Consumer) Setup(session sarama.ConsumerGroupSession) error { // Mark the consumer as ready close(consumer.ready) return nil } // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error { return nil } // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). // Once the Messages() channel is closed, the Handler must finish its processing // loop and exit. func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: // https://github.com/IBM/sarama/blob/main/consumer_group.go#L27-L2 for { select { case message, ok := <-claim.Messages(): if !ok { log.Printf("message channel was closed") return nil } func() { producer := consumer.producerProvider.borrow(message.Topic, message.Partition) defer consumer.producerProvider.release(message.Topic, message.Partition, producer) startTime := time.Now() // BeginTxn must be called before any messages. err := producer.BeginTxn() if err != nil { log.Printf("Message consumer: unable to start transaction: %+v", err) return } // Produce current record in producer transaction. producer.Input() <- &sarama.ProducerMessage{ Topic: destinationTopic, Key: sarama.ByteEncoder(message.Key), Value: sarama.ByteEncoder(message.Value), } // You can add current message to this transaction err = producer.AddMessageToTxn(message, consumer.groupId, nil) if err != nil { log.Println("error on AddMessageToTxn") consumer.handleTxnError(producer, message, session, err, func() error { return producer.AddMessageToTxn(message, consumer.groupId, nil) }) return } // Commit producer transaction. err = producer.CommitTxn() if err != nil { log.Println("error on CommitTxn") consumer.handleTxnError(producer, message, session, err, func() error { return producer.CommitTxn() }) return } log.Printf("Message claimed [%s]: value = %s, timestamp = %v, topic = %s, partition = %d", time.Since(startTime), string(message.Value), message.Timestamp, message.Topic, message.Partition) }() // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: // https://github.com/IBM/sarama/issues/1192 case <-session.Context().Done(): return nil } } } func (consumer *Consumer) handleTxnError(producer sarama.AsyncProducer, message *sarama.ConsumerMessage, session sarama.ConsumerGroupSession, err error, defaulthandler func() error) { log.Printf("Message consumer: unable to process transaction: %+v", err) for { if producer.TxnStatus()&sarama.ProducerTxnFlagFatalError != 0 { // fatal error. need to recreate producer. log.Printf("Message consumer: producer is in a fatal state, need to recreate it") // reset current consumer offset to retry consume this record. session.ResetOffset(message.Topic, message.Partition, message.Offset, "") return } if producer.TxnStatus()&sarama.ProducerTxnFlagAbortableError != 0 { err = producer.AbortTxn() if err != nil { log.Printf("Message consumer: unable to abort transaction: %+v", err) continue } // reset current consumer offset to retry consume this record. session.ResetOffset(message.Topic, message.Partition, message.Offset, "") return } // if not you can retry err = defaulthandler() if err == nil { return } } } type topicPartition struct { topic string partition int32 } type producerProvider struct { producersLock sync.Mutex producers map[topicPartition][]sarama.AsyncProducer producerProvider func(topic string, partition int32) sarama.AsyncProducer } func newProducerProvider(brokers []string, producerConfigurationProvider func() *sarama.Config) *producerProvider { provider := &producerProvider{ producers: make(map[topicPartition][]sarama.AsyncProducer), } provider.producerProvider = func(topic string, partition int32) sarama.AsyncProducer { config := producerConfigurationProvider() if config.Producer.Transaction.ID != "" { config.Producer.Transaction.ID = config.Producer.Transaction.ID + "-" + topic + "-" + fmt.Sprint(partition) } producer, err := sarama.NewAsyncProducer(brokers, config) if err != nil { return nil } return producer } return provider } func (p *producerProvider) borrow(topic string, partition int32) (producer sarama.AsyncProducer) { p.producersLock.Lock() defer p.producersLock.Unlock() tp := topicPartition{topic: topic, partition: partition} if producers, ok := p.producers[tp]; !ok || len(producers) == 0 { for { producer = p.producerProvider(topic, partition) if producer != nil { return } } } index := len(p.producers[tp]) - 1 producer = p.producers[tp][index] p.producers[tp] = p.producers[tp][:index] return } func (p *producerProvider) release(topic string, partition int32, producer sarama.AsyncProducer) { p.producersLock.Lock() defer p.producersLock.Unlock() if producer.TxnStatus()&sarama.ProducerTxnFlagInError != 0 { // Try to close it _ = producer.Close() return } tp := topicPartition{topic: topic, partition: partition} p.producers[tp] = append(p.producers[tp], producer) } func (p *producerProvider) clear() { p.producersLock.Lock() defer p.producersLock.Unlock() for _, producers := range p.producers { for _, producer := range producers { producer.Close() } } for _, producers := range p.producers { producers = producers[:0] } } golang-github-ibm-sarama-1.43.2/examples/http_server/000077500000000000000000000000001461256741300225335ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/http_server/.gitignore000066400000000000000000000000351461256741300245210ustar00rootroot00000000000000http_server http_server.test golang-github-ibm-sarama-1.43.2/examples/http_server/README.md000066400000000000000000000020551461256741300240140ustar00rootroot00000000000000# HTTP server example This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. golang-github-ibm-sarama-1.43.2/examples/http_server/go.mod000066400000000000000000000020761461256741300236460ustar00rootroot00000000000000module github.com/IBM/sarama/examples/http_server go 1.19 require github.com/IBM/sarama v1.43.1 require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/http_server/go.sum000066400000000000000000000200521461256741300236650ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/http_server/http_server.go000066400000000000000000000162761461256741300254430ustar00rootroot00000000000000package main import ( "crypto/tls" "crypto/x509" "encoding/json" "flag" "fmt" "log" "net/http" "os" "strings" "time" "github.com/IBM/sarama" ) var ( addr = flag.String("addr", ":8080", "The address to bind to") brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") version = flag.String("version", sarama.DefaultVersion.String(), "Kafka cluster version") verbose = flag.Bool("verbose", false, "Turn on Sarama logging") certFile = flag.String("certificate", "", "The optional certificate file for client authentication") keyFile = flag.String("key", "", "The optional key file for client authentication") caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether to skip TLS server cert verification") ) func main() { flag.Parse() if *verbose { sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) } if *brokers == "" { flag.PrintDefaults() os.Exit(1) } brokerList := strings.Split(*brokers, ",") log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) version, err := sarama.ParseKafkaVersion(*version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } server := &Server{ DataCollector: newDataCollector(brokerList, version), AccessLogProducer: newAccessLogProducer(brokerList, version), } defer func() { if err := server.Close(); err != nil { log.Println("Failed to close server", err) } }() log.Fatal(server.Run(*addr)) } func createTlsConfiguration() (t *tls.Config) { if *certFile != "" && *keyFile != "" && *caFile != "" { cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) if err != nil { log.Fatal(err) } caCert, err := os.ReadFile(*caFile) if err != nil { log.Fatal(err) } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) t = &tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, InsecureSkipVerify: *tlsSkipVerify, } } // will be nil by default if nothing is provided return t } type Server struct { DataCollector sarama.SyncProducer AccessLogProducer sarama.AsyncProducer } func (s *Server) Close() error { if err := s.DataCollector.Close(); err != nil { log.Println("Failed to shut down data collector cleanly", err) } if err := s.AccessLogProducer.Close(); err != nil { log.Println("Failed to shut down access log producer cleanly", err) } return nil } func (s *Server) Handler() http.Handler { return s.withAccessLog(s.collectQueryStringData()) } func (s *Server) Run(addr string) error { httpServer := &http.Server{ Addr: addr, Handler: s.Handler(), } log.Printf("Listening for requests on %s...\n", addr) return httpServer.ListenAndServe() } func (s *Server) collectQueryStringData() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } // We are not setting a message key, which means that all messages will // be distributed randomly over the different partitions. partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ Topic: "important", Value: sarama.StringEncoder(r.URL.RawQuery), }) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Failed to store your data: %s", err) } else { // The tuple (topic, partition, offset) can be used as a unique identifier // for a message in a Kafka cluster. fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) } }) } type accessLogEntry struct { Method string `json:"method"` Host string `json:"host"` Path string `json:"path"` IP string `json:"ip"` ResponseTime float64 `json:"response_time"` encoded []byte err error } func (ale *accessLogEntry) ensureEncoded() { if ale.encoded == nil && ale.err == nil { ale.encoded, ale.err = json.Marshal(ale) } } func (ale *accessLogEntry) Length() int { ale.ensureEncoded() return len(ale.encoded) } func (ale *accessLogEntry) Encode() ([]byte, error) { ale.ensureEncoded() return ale.encoded, ale.err } func (s *Server) withAccessLog(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { started := time.Now() next.ServeHTTP(w, r) entry := &accessLogEntry{ Method: r.Method, Host: r.Host, Path: r.RequestURI, IP: r.RemoteAddr, ResponseTime: float64(time.Since(started)) / float64(time.Second), } // We will use the client's IP address as key. This will cause // all the access log entries of the same IP address to end up // on the same partition. s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ Topic: "access_log", Key: sarama.StringEncoder(r.RemoteAddr), Value: entry, } }) } func newDataCollector(brokerList []string, version sarama.KafkaVersion) sarama.SyncProducer { // For the data collector, we are looking for strong consistency semantics. // Because we don't change the flush settings, sarama will try to produce messages // as fast as possible to keep latency low. config := sarama.NewConfig() config.Version = version config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message config.Producer.Return.Successes = true tlsConfig := createTlsConfiguration() if tlsConfig != nil { config.Net.TLS.Config = tlsConfig config.Net.TLS.Enable = true } // On the broker side, you may want to change the following settings to get // stronger consistency guarantees: // - For your broker, set `unclean.leader.election.enable` to false // - For the topic, you could increase `min.insync.replicas`. producer, err := sarama.NewSyncProducer(brokerList, config) if err != nil { log.Fatalln("Failed to start Sarama producer:", err) } return producer } func newAccessLogProducer(brokerList []string, version sarama.KafkaVersion) sarama.AsyncProducer { // For the access log, we are looking for AP semantics, with high throughput. // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. config := sarama.NewConfig() config.Version = version tlsConfig := createTlsConfiguration() if tlsConfig != nil { config.Net.TLS.Enable = true config.Net.TLS.Config = tlsConfig } config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack config.Producer.Compression = sarama.CompressionSnappy // Compress messages config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms producer, err := sarama.NewAsyncProducer(brokerList, config) if err != nil { log.Fatalln("Failed to start Sarama producer:", err) } // We will just log to STDOUT if we're not able to produce messages. // Note: messages will only be returned here after all retry attempts are exhausted. go func() { for err := range producer.Errors() { log.Println("Failed to write access log entry:", err) } }() return producer } golang-github-ibm-sarama-1.43.2/examples/http_server/http_server_test.go000066400000000000000000000056111461256741300264710ustar00rootroot00000000000000package main import ( "io" "net/http" "net/http/httptest" "testing" "github.com/IBM/sarama" "github.com/IBM/sarama/mocks" ) // In normal operation, we expect one access log entry, // and one data collector entry. Let's assume both will succeed. // We should return a HTTP 200 status. func TestCollectSuccessfully(t *testing.T) { dataCollectorMock := mocks.NewSyncProducer(t, nil) dataCollectorMock.ExpectSendMessageAndSucceed() accessLogProducerMock := mocks.NewAsyncProducer(t, nil) accessLogProducerMock.ExpectInputAndSucceed() // Now, use dependency injection to use the mocks. s := &Server{ DataCollector: dataCollectorMock, AccessLogProducer: accessLogProducerMock, } // The Server's Close call is important; it will call Close on // the two mock producers, which will then validate whether all // expectations are resolved. defer safeClose(t, s) req, err := http.NewRequest("GET", "http://example.com/?data", nil) if err != nil { t.Fatal(err) } res := httptest.NewRecorder() s.Handler().ServeHTTP(res, req) if res.Code != 200 { t.Errorf("Expected HTTP status 200, found %d", res.Code) } if res.Body.String() != "Your data is stored with unique identifier important/0/1" { t.Error("Unexpected response body", res.Body) } } // Now, let's see if we handle the case of not being able to produce // to the data collector properly. In this case we should return a 500 status. func TestCollectionFailure(t *testing.T) { dataCollectorMock := mocks.NewSyncProducer(t, nil) dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut) accessLogProducerMock := mocks.NewAsyncProducer(t, nil) accessLogProducerMock.ExpectInputAndSucceed() s := &Server{ DataCollector: dataCollectorMock, AccessLogProducer: accessLogProducerMock, } defer safeClose(t, s) req, err := http.NewRequest("GET", "http://example.com/?data", nil) if err != nil { t.Fatal(err) } res := httptest.NewRecorder() s.Handler().ServeHTTP(res, req) if res.Code != 500 { t.Errorf("Expected HTTP status 500, found %d", res.Code) } } // We don't expect any data collector calls because the path is wrong, // so we are not setting any expectations on the dataCollectorMock. It // will still generate an access log entry though. func TestWrongPath(t *testing.T) { dataCollectorMock := mocks.NewSyncProducer(t, nil) accessLogProducerMock := mocks.NewAsyncProducer(t, nil) accessLogProducerMock.ExpectInputAndSucceed() s := &Server{ DataCollector: dataCollectorMock, AccessLogProducer: accessLogProducerMock, } defer safeClose(t, s) req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil) if err != nil { t.Fatal(err) } res := httptest.NewRecorder() s.Handler().ServeHTTP(res, req) if res.Code != 404 { t.Errorf("Expected HTTP status 404, found %d", res.Code) } } func safeClose(t *testing.T, o io.Closer) { if err := o.Close(); err != nil { t.Error(err) } } golang-github-ibm-sarama-1.43.2/examples/interceptors/000077500000000000000000000000001461256741300227075ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/interceptors/README.md000066400000000000000000000122631461256741300241720ustar00rootroot00000000000000# Interceptors Example It creates a *Producer* interceptor to produce some [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go/) spans and also modifies the intercepted message to include some headers. ``` go conf.Producer.Interceptors = []sarama.ProducerInterceptor{xxx} ``` ## Run the example - `go run main.go trace_interceptor.go`. - or `go build and pass different parameters. ``` sh go build && ./interceptors --h Usage of ./interceptors: -brokers string The Kafka brokers to connect to, as a comma separated list (default "localhost:9092") -topic string The Kafka topic to use (default "default_topic") ``` App will output OpenTelemetry spans for every intercepted message, i.e: ``` go run main.go trace_interceptor.go [Sarama] 2020/08/11 11:35:56 Initializing new client [Sarama] 2020/08/11 11:35:56 ClientID is the default of 'sarama', you should consider setting it to something application-specific. [Sarama] 2020/08/11 11:35:56 ClientID is the default of 'sarama', you should consider setting it to something application-specific. [Sarama] 2020/08/11 11:35:56 client/metadata fetching metadata for all topics from broker localhost:9092 [Sarama] 2020/08/11 11:35:56 Connected to broker at localhost:9092 (unregistered) [Sarama] 2020/08/11 11:35:56 client/brokers registered new broker #1 at localhost:9092 [Sarama] 2020/08/11 11:35:56 Successfully initialized new client INFO[0000] Starting to produce 2 messages every 5s INFO[0005] producing 2 messages at 2020-08-11T11:36:01-07:00 topic=default_topic [ { "SpanContext": { "TraceID": "2c4210c1d6c2ebe758eb41cbc95a0478", "SpanID": "046bfc6d6db17ed7", "TraceFlags": 1 }, "ParentSpanID": "0000000000000000", "SpanKind": 1, "Name": "default_topic", "StartTime": "2020-08-11T11:36:01.57487-07:00", "EndTime": "2020-08-11T11:36:01.574891849-07:00", "Attributes": [ { "Key": "messaging.destination_kind", "Value": { "Type": "STRING", "Value": "topic" } }, { "Key": "span.otel.kind", "Value": { "Type": "STRING", "Value": "PRODUCER" } }, { "Key": "messaging.system", "Value": { "Type": "STRING", "Value": "kafka" } }, { "Key": "net.transport", "Value": { "Type": "STRING", "Value": "IP.TCP" } }, { "Key": "messaging.url", "Value": { "Type": "STRING", "Value": "localhost:9092" } }, { "Key": "messaging.destination", "Value": { "Type": "STRING", "Value": "default_topic" } }, { "Key": "messaging.message_id", "Value": { "Type": "STRING", "Value": "046bfc6d6db17ed7" } } ], "MessageEvents": null, "Links": null, "StatusCode": 0, "StatusMessage": "", "HasRemoteParent": false, "DroppedAttributeCount": 0, "DroppedMessageEventCount": 0, "DroppedLinkCount": 0, "ChildSpanCount": 0, "Resource": null, "InstrumentationLibrary": { "Name": "shopify.com/sarama/examples/interceptors", "Version": "" } } ] [{"SpanContext":{"TraceID":"b3922fbbaab23b16401c353b0ff9ce6b","SpanID":"269f5133c0d0116e","TraceFlags":1},"ParentSpanID":"0000000000000000","SpanKind":1,"Name":"default_topic","StartTime":"2020-08-11T11:36:01.575388-07:00","EndTime":"2020-08-11T11:36:01.575399065-07:00","Attributes":[{"Key":"messaging.destination_kind","Value":{"Type":"STRING","Value":"topic"}},{"Key":"span.otel.kind","Value":{"Type":"STRING","Value":"PRODUCER"}},{"Key":"messaging.system","Value":{"Type":"STRING","Value":"kafka"}},{"Key":"net.transport","Value":{"Type":"STRING","Value":"IP.TCP"}},{"Key":"messaging.url","Value":{"Type":"STRING","Value":"localhost:9092"}},{"Key":"messaging.destination","Value":{"Type":"STRING","Value":"default_topic"}},{"Key":"messaging.message_id","Value":{"Type":"STRING","Value":"269f5133c0d0116e"}}],"MessageEvents":null,"Links":null,"StatusCode":0,"StatusMessage":"","HasRemoteParent":false,"DroppedAttributeCount":0,"DroppedMessageEventCount":0,"DroppedLinkCount":0,"ChildSpanCount":0,"Resource":null,"InstrumentationLibrary":{"Name":"shopify.com/sarama/examples/interceptors","Version":""}}] [Sarama] 2020/08/11 11:36:01 ClientID is the default of 'sarama', you should consider setting it to something application-specific. [Sarama] 2020/08/11 11:36:01 producer/broker/1 starting up [Sarama] 2020/08/11 11:36:01 producer/broker/1 state change to [open] on default_topic/0 [Sarama] 2020/08/11 11:36:01 Connected to broker at localhost:9092 (registered as #1) ^CINFO[0005] terminating the program INFO[0005] Bye :) [Sarama] 2020/08/11 11:36:02 Producer shutting down. ``` ## Check the produced intercepted messages Check that messages have some headers added by the interceptor: ``` sh kafkacat -Cb localhost:9092 -t default_topic -f '\n- %s\nheaders: %h' ``` ``` headers: trace_id=235b3424775d8b2f9bf21e458496f447,span_id=50da1552c105e712,message_id=50da1552c105e712 - test message 1/2 from kafka-client-go-test at 2020-08-11T11:36:01-07:00 headers: trace_id=2c4210c1d6c2ebe758eb41cbc95a0478,span_id=046bfc6d6db17ed7,message_id=046bfc6d6db17ed7 - test message 2/2 from kafka-client-go-test at 2020-08-11T11:36:01-07:00 % Reached end of topic default_topic [0] at offset 444 ``` golang-github-ibm-sarama-1.43.2/examples/interceptors/go.mod000066400000000000000000000027401461256741300240200ustar00rootroot00000000000000module github.com/IBM/sarama/examples/interceptors go 1.19 require ( github.com/IBM/sarama v1.43.1 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect golang.org/x/sys v0.19.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/interceptors/go.sum000066400000000000000000000234351461256741300240510ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 h1:JYE2HM7pZbOt5Jhk8ndWZTUWYOVift2cHjXVMkPdmdc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0/go.mod h1:yMb/8c6hVsnma0RpsBMNo0fEiQKeclawtgaIaOp2MLY= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/interceptors/main.go000066400000000000000000000043141461256741300241640ustar00rootroot00000000000000package main import ( "context" "flag" "fmt" "log" "os" "os/signal" "strings" "time" stdout "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "github.com/IBM/sarama" ) var ( brokers = flag.String("brokers", "localhost:9092", "The Kafka brokers to connect to, as a comma separated list") version = flag.String("version", sarama.DefaultVersion.String(), "Kafka cluster version") topic = flag.String("topic", "default_topic", "The Kafka topic to use") logger = log.New(os.Stdout, "[OTelInterceptor] ", log.LstdFlags) ) func main() { flag.Parse() if *brokers == "" { logger.Fatalln("at least one broker is required") } splitBrokers := strings.Split(*brokers, ",") sarama.Logger = log.New(os.Stdout, "[Sarama] ", log.LstdFlags) version, err := sarama.ParseKafkaVersion(*version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } // oTel stdout example pusher, err := stdout.New() if err != nil { logger.Fatalf("failed to initialize stdout export pipeline: %v", err) } defer pusher.Shutdown(context.Background()) // simple sarama producer that adds a new producer interceptor conf := sarama.NewConfig() conf.Version = version conf.Producer.Interceptors = []sarama.ProducerInterceptor{NewOTelInterceptor(splitBrokers)} producer, err := sarama.NewAsyncProducer(splitBrokers, conf) if err != nil { panic("Couldn't create a Kafka producer") } defer producer.AsyncClose() // kill -2, trap SIGINT to trigger a shutdown signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) // ticker bulkSize := 2 duration := 5 * time.Second ticker := time.NewTicker(duration) logger.Printf("Starting to produce %v messages every %v", bulkSize, duration) for { select { case t := <-ticker.C: now := t.Format(time.RFC3339) logger.Printf("\nproducing %v messages to topic %s at %s", bulkSize, *topic, now) for i := 0; i < bulkSize; i++ { producer.Input() <- &sarama.ProducerMessage{ Topic: *topic, Key: nil, Value: sarama.StringEncoder(fmt.Sprintf("test message %v/%v from kafka-client-go-test at %s", i+1, bulkSize, now)), } } case <-signals: logger.Println("terminating the program") logger.Println("Bye :)") return } } } golang-github-ibm-sarama-1.43.2/examples/interceptors/trace_interceptor.go000066400000000000000000000052471461256741300267620ustar00rootroot00000000000000package main import ( "context" "strings" "go.opentelemetry.io/otel/attribute" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" "github.com/IBM/sarama" ) type OTelInterceptor struct { tracer trace.Tracer fixedAttrs []attribute.KeyValue } // NewOTelInterceptor processes span for intercepted messages and add some // headers with the span data. func NewOTelInterceptor(brokers []string) *OTelInterceptor { oi := OTelInterceptor{} oi.tracer = sdktrace.NewTracerProvider().Tracer("github.com/IBM/sarama/examples/interceptors") // These are based on the spec, which was reachable as of 2020-05-15 // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md oi.fixedAttrs = []attribute.KeyValue{ attribute.String("messaging.destination_kind", "topic"), attribute.String("span.otel.kind", "PRODUCER"), attribute.String("messaging.system", "kafka"), attribute.String("net.transport", "IP.TCP"), attribute.String("messaging.url", strings.Join(brokers, ",")), } return &oi } const ( MessageIDHeaderName = "message_id" SpanHeaderName = "span_id" TraceHeaderName = "trace_id" ) func shouldIgnoreMsg(msg *sarama.ProducerMessage) bool { // check message hasn't been here before (retries) var traceFound, spanFound, msgIDFound bool for _, h := range msg.Headers { if string(h.Key) == TraceHeaderName { traceFound = true continue } if string(h.Key) == SpanHeaderName { spanFound = true continue } if string(h.Key) == MessageIDHeaderName { msgIDFound = true } } return traceFound && spanFound && msgIDFound } func (oi *OTelInterceptor) OnSend(msg *sarama.ProducerMessage) { if shouldIgnoreMsg(msg) { return } _, span := oi.tracer.Start(context.TODO(), msg.Topic) defer span.End() spanContext := span.SpanContext() attWithTopic := append( oi.fixedAttrs, attribute.String("messaging.destination", msg.Topic), attribute.String("messaging.message_id", spanContext.SpanID().String()), ) span.SetAttributes(attWithTopic...) // remove existing partial tracing headers if exists noTraceHeaders := msg.Headers[:0] for _, h := range msg.Headers { key := string(h.Key) if key != TraceHeaderName && key != SpanHeaderName && key != MessageIDHeaderName { noTraceHeaders = append(noTraceHeaders, h) } } traceHeaders := []sarama.RecordHeader{ {Key: []byte(TraceHeaderName), Value: []byte(spanContext.TraceID().String())}, {Key: []byte(SpanHeaderName), Value: []byte(spanContext.SpanID().String())}, {Key: []byte(MessageIDHeaderName), Value: []byte(spanContext.SpanID().String())}, } msg.Headers = append(noTraceHeaders, traceHeaders...) } golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/000077500000000000000000000000001461256741300236535ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/.gitignore000066400000000000000000000000221461256741300256350ustar00rootroot00000000000000sasl_scram_client golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/README.md000066400000000000000000000002341461256741300251310ustar00rootroot00000000000000Example command line: ```./sasl_scram_client -brokers localhost:9094 -username foo -passwd a_password -topic topic_name -tls -algorithm [sha256|sha512]``` golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/go.mod000066400000000000000000000023561461256741300247670ustar00rootroot00000000000000module github.com/IBM/sarama/examples/sasl_scram_client go 1.19 require ( github.com/IBM/sarama v1.43.1 github.com/xdg-go/scram v1.1.2 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect golang.org/x/text v0.14.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/go.sum000066400000000000000000000214201461256741300250050ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/main.go000066400000000000000000000116311461256741300251300ustar00rootroot00000000000000package main import ( "crypto/tls" "crypto/x509" "flag" "log" "os" "os/signal" "strings" "github.com/IBM/sarama" ) func init() { sarama.Logger = log.New(os.Stdout, "[Sarama] ", log.LstdFlags) } var ( brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") version = flag.String("version", sarama.DefaultVersion.String(), "Kafka cluster version") userName = flag.String("username", "", "The SASL username") passwd = flag.String("passwd", "", "The SASL password") algorithm = flag.String("algorithm", "", "The SASL SCRAM SHA algorithm sha256 or sha512 as mechanism") topic = flag.String("topic", "default_topic", "The Kafka topic to use") certFile = flag.String("certificate", "", "The optional certificate file for client authentication") keyFile = flag.String("key", "", "The optional key file for client authentication") caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether to skip TLS server cert verification") useTLS = flag.Bool("tls", false, "Use TLS to communicate with the cluster") mode = flag.String("mode", "produce", "Mode to run in: \"produce\" to produce, \"consume\" to consume") logMsg = flag.Bool("logmsg", false, "True to log consumed messages to console") logger = log.New(os.Stdout, "[Producer] ", log.LstdFlags) ) func createTLSConfiguration() (t *tls.Config) { t = &tls.Config{ InsecureSkipVerify: *tlsSkipVerify, } if *certFile != "" && *keyFile != "" && *caFile != "" { cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) if err != nil { log.Fatal(err) } caCert, err := os.ReadFile(*caFile) if err != nil { log.Fatal(err) } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) t = &tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, InsecureSkipVerify: *tlsSkipVerify, } } return t } func main() { flag.Parse() if *brokers == "" { log.Fatalln("at least one broker is required") } splitBrokers := strings.Split(*brokers, ",") version, err := sarama.ParseKafkaVersion(*version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } if *userName == "" { log.Fatalln("SASL username is required") } if *passwd == "" { log.Fatalln("SASL password is required") } conf := sarama.NewConfig() conf.Producer.Retry.Max = 1 conf.Producer.RequiredAcks = sarama.WaitForAll conf.Producer.Return.Successes = true conf.Version = version conf.ClientID = "sasl_scram_client" conf.Metadata.Full = true conf.Net.SASL.Enable = true conf.Net.SASL.User = *userName conf.Net.SASL.Password = *passwd conf.Net.SASL.Handshake = true if *algorithm == "sha512" { conf.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } conf.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 } else if *algorithm == "sha256" { conf.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } conf.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 } else { log.Fatalf("invalid SHA algorithm \"%s\": can be either \"sha256\" or \"sha512\"", *algorithm) } if *useTLS { conf.Net.TLS.Enable = true conf.Net.TLS.Config = createTLSConfiguration() } if *mode == "consume" { consumer, err := sarama.NewConsumer(splitBrokers, conf) if err != nil { panic(err) } log.Println("consumer created") defer func() { if err := consumer.Close(); err != nil { log.Fatalln(err) } }() log.Println("commence consuming") partitionConsumer, err := consumer.ConsumePartition(*topic, 0, sarama.OffsetOldest) if err != nil { panic(err) } defer func() { if err := partitionConsumer.Close(); err != nil { log.Fatalln(err) } }() // Trap SIGINT to trigger a shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) consumed := 0 ConsumerLoop: for { log.Println("in the for") select { case msg := <-partitionConsumer.Messages(): log.Printf("Consumed message offset %d\n", msg.Offset) if *logMsg { log.Printf("KEY: %s VALUE: %s", msg.Key, msg.Value) } consumed++ case <-signals: break ConsumerLoop } } log.Printf("Consumed: %d\n", consumed) } else { syncProducer, err := sarama.NewSyncProducer(splitBrokers, conf) if err != nil { logger.Fatalln("failed to create producer: ", err) } partition, offset, err := syncProducer.SendMessage(&sarama.ProducerMessage{ Topic: *topic, Value: sarama.StringEncoder("test_message"), }) if err != nil { logger.Fatalln("failed to send message to ", *topic, err) } logger.Printf("wrote message at partition: %d, offset: %d", partition, offset) _ = syncProducer.Close() } logger.Println("Bye now !") } golang-github-ibm-sarama-1.43.2/examples/sasl_scram_client/scram_client.go000066400000000000000000000013671461256741300266540ustar00rootroot00000000000000package main import ( "crypto/sha256" "crypto/sha512" "github.com/xdg-go/scram" ) var ( SHA256 scram.HashGeneratorFcn = sha256.New SHA512 scram.HashGeneratorFcn = sha512.New ) type XDGSCRAMClient struct { *scram.Client *scram.ClientConversation scram.HashGeneratorFcn } func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) if err != nil { return err } x.ClientConversation = x.Client.NewConversation() return nil } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { response, err = x.ClientConversation.Step(challenge) return } func (x *XDGSCRAMClient) Done() bool { return x.ClientConversation.Done() } golang-github-ibm-sarama-1.43.2/examples/txn_producer/000077500000000000000000000000001461256741300227025ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/examples/txn_producer/.gitignore000066400000000000000000000000151461256741300246660ustar00rootroot00000000000000txn_producer golang-github-ibm-sarama-1.43.2/examples/txn_producer/README.md000066400000000000000000000010011461256741300241510ustar00rootroot00000000000000# Transactional producer example This example shows you how to use the Sarama transactional producer. The example simply starts some goroutine that produce endlesslly on associated topic. ```bash $ go run main.go -brokers="127.0.0.1:9092" -topic "sarama" -producers 10 -records-number 10000 ``` To ensure transactional-id uniqueness it implement some ***ProducerProvider*** that build a producer appending an integer that grow when producer is created. You can also see record-rate each 5s printing on stdout. golang-github-ibm-sarama-1.43.2/examples/txn_producer/go.mod000066400000000000000000000020701461256741300240070ustar00rootroot00000000000000module github.com/IBM/sarama/examples/txn_producer go 1.19 require ( github.com/IBM/sarama v1.43.1 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/net v0.24.0 // indirect ) replace github.com/IBM/sarama => ../../ golang-github-ibm-sarama-1.43.2/examples/txn_producer/go.sum000066400000000000000000000200521461256741300240340ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/examples/txn_producer/main.go000066400000000000000000000134001461256741300241530ustar00rootroot00000000000000package main // SIGUSR1 toggle the pause/resume consumption import ( "context" "flag" "fmt" "log" "os" "os/signal" "strings" "sync" "syscall" "time" _ "net/http/pprof" "github.com/rcrowley/go-metrics" "github.com/IBM/sarama" ) // Sarama configuration options var ( brokers = "" version = "" topic = "" producers = 1 verbose = false recordsNumber int64 = 1 recordsRate = metrics.GetOrRegisterMeter("records.rate", nil) ) func init() { flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list") flag.StringVar(&version, "version", sarama.DefaultVersion.String(), "Kafka cluster version") flag.StringVar(&topic, "topic", "", "Kafka topics where records will be copied from topics.") flag.IntVar(&producers, "producers", 10, "Number of concurrent producers") flag.Int64Var(&recordsNumber, "records-number", 10000, "Number of records sent per loop") flag.BoolVar(&verbose, "verbose", false, "Sarama logging") flag.Parse() if len(brokers) == 0 { panic("no Kafka bootstrap brokers defined, please set the -brokers flag") } if len(topic) == 0 { panic("no topic given to be consumed, please set the -topic flag") } } func main() { keepRunning := true log.Println("Starting a new Sarama producer") if verbose { sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) } version, err := sarama.ParseKafkaVersion(version) if err != nil { log.Panicf("Error parsing Kafka version: %v", err) } producerProvider := newProducerProvider(strings.Split(brokers, ","), func() *sarama.Config { config := sarama.NewConfig() config.Version = version config.Producer.Idempotent = true config.Producer.Return.Errors = false config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Partitioner = sarama.NewRoundRobinPartitioner config.Producer.Transaction.Retry.Backoff = 10 config.Producer.Transaction.ID = "txn_producer" config.Net.MaxOpenRequests = 1 return config }) go metrics.Log(metrics.DefaultRegistry, 5*time.Second, log.New(os.Stderr, "metrics: ", log.LstdFlags)) ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup for i := 0; i < producers; i++ { wg.Add(1) go func() { defer wg.Done() for { select { case <-ctx.Done(): return default: produceTestRecord(producerProvider) } } }() } sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM) for keepRunning { <-sigterm log.Println("terminating: via signal") keepRunning = false } cancel() wg.Wait() producerProvider.clear() } func produceTestRecord(producerProvider *producerProvider) { producer := producerProvider.borrow() defer producerProvider.release(producer) // Start kafka transaction err := producer.BeginTxn() if err != nil { log.Printf("unable to start txn %s\n", err) return } // Produce some records in transaction var i int64 for i = 0; i < recordsNumber; i++ { producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder("test")} } // commit transaction err = producer.CommitTxn() if err != nil { log.Printf("Producer: unable to commit txn %s\n", err) for { if producer.TxnStatus()&sarama.ProducerTxnFlagFatalError != 0 { // fatal error. need to recreate producer. log.Printf("Producer: producer is in a fatal state, need to recreate it") break } // If producer is in abortable state, try to abort current transaction. if producer.TxnStatus()&sarama.ProducerTxnFlagAbortableError != 0 { err = producer.AbortTxn() if err != nil { // If an error occured just retry it. log.Printf("Producer: unable to abort transaction: %+v", err) continue } break } // if not you can retry err = producer.CommitTxn() if err != nil { log.Printf("Producer: unable to commit txn %s\n", err) continue } } return } recordsRate.Mark(recordsNumber) } // pool of producers that ensure transactional-id is unique. type producerProvider struct { transactionIdGenerator int32 producersLock sync.Mutex producers []sarama.AsyncProducer producerProvider func() sarama.AsyncProducer } func newProducerProvider(brokers []string, producerConfigurationProvider func() *sarama.Config) *producerProvider { provider := &producerProvider{} provider.producerProvider = func() sarama.AsyncProducer { config := producerConfigurationProvider() suffix := provider.transactionIdGenerator // Append transactionIdGenerator to current config.Producer.Transaction.ID to ensure transaction-id uniqueness. if config.Producer.Transaction.ID != "" { provider.transactionIdGenerator++ config.Producer.Transaction.ID = config.Producer.Transaction.ID + "-" + fmt.Sprint(suffix) } producer, err := sarama.NewAsyncProducer(brokers, config) if err != nil { return nil } return producer } return provider } func (p *producerProvider) borrow() (producer sarama.AsyncProducer) { p.producersLock.Lock() defer p.producersLock.Unlock() if len(p.producers) == 0 { for { producer = p.producerProvider() if producer != nil { return } } } index := len(p.producers) - 1 producer = p.producers[index] p.producers = p.producers[:index] return } func (p *producerProvider) release(producer sarama.AsyncProducer) { p.producersLock.Lock() defer p.producersLock.Unlock() // If released producer is erroneous close it and don't return it to the producer pool. if producer.TxnStatus()&sarama.ProducerTxnFlagInError != 0 { // Try to close it _ = producer.Close() return } p.producers = append(p.producers, producer) } func (p *producerProvider) clear() { p.producersLock.Lock() defer p.producersLock.Unlock() for _, producer := range p.producers { producer.Close() } p.producers = p.producers[:0] } golang-github-ibm-sarama-1.43.2/fetch_request.go000066400000000000000000000176461461256741300215560ustar00rootroot00000000000000package sarama import "fmt" type fetchRequestBlock struct { Version int16 // currentLeaderEpoch contains the current leader epoch of the partition. currentLeaderEpoch int32 // fetchOffset contains the message offset. fetchOffset int64 // logStartOffset contains the earliest available offset of the follower // replica. The field is only used when the request is sent by the // follower. logStartOffset int64 // maxBytes contains the maximum bytes to fetch from this partition. See // KIP-74 for cases where this limit may not be honored. maxBytes int32 } func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { b.Version = version if b.Version >= 9 { pe.putInt32(b.currentLeaderEpoch) } pe.putInt64(b.fetchOffset) if b.Version >= 5 { pe.putInt64(b.logStartOffset) } pe.putInt32(b.maxBytes) return nil } func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { b.Version = version if b.Version >= 9 { if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { return err } } if b.fetchOffset, err = pd.getInt64(); err != nil { return err } if b.Version >= 5 { if b.logStartOffset, err = pd.getInt64(); err != nil { return err } } if b.maxBytes, err = pd.getInt32(); err != nil { return err } return nil } // FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // ReplicaID contains the broker ID of the follower, of -1 if this request // is from a consumer. // ReplicaID int32 // MaxWaitTime contains the maximum time in milliseconds to wait for the response. MaxWaitTime int32 // MinBytes contains the minimum bytes to accumulate in the response. MinBytes int32 // MaxBytes contains the maximum bytes to fetch. See KIP-74 for cases // where this limit may not be honored. MaxBytes int32 // Isolation contains a This setting controls the visibility of // transactional records. Using READ_UNCOMMITTED (isolation_level = 0) // makes all records visible. With READ_COMMITTED (isolation_level = 1), // non-transactional and COMMITTED transactional records are visible. To be // more concrete, READ_COMMITTED returns all data from offsets smaller than // the current LSO (last stable offset), and enables the inclusion of the // list of aborted transactions in the result, which allows consumers to // discard ABORTED transactional records Isolation IsolationLevel // SessionID contains the fetch session ID. SessionID int32 // SessionEpoch contains the epoch of the partition leader as known to the // follower replica or a consumer. SessionEpoch int32 // blocks contains the topics to fetch. blocks map[string]map[int32]*fetchRequestBlock // forgotten contains in an incremental fetch request, the partitions to remove. forgotten map[string][]int32 // RackID contains a Rack ID of the consumer making this request RackID string } type IsolationLevel int8 const ( ReadUncommitted IsolationLevel = iota ReadCommitted ) func (r *FetchRequest) encode(pe packetEncoder) (err error) { metricRegistry := pe.metricRegistry() pe.putInt32(-1) // ReplicaID is always -1 for clients pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MinBytes) if r.Version >= 3 { pe.putInt32(r.MaxBytes) } if r.Version >= 4 { pe.putInt8(int8(r.Isolation)) } if r.Version >= 7 { pe.putInt32(r.SessionID) pe.putInt32(r.SessionEpoch) } err = pe.putArrayLength(len(r.blocks)) if err != nil { return err } for topic, blocks := range r.blocks { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(blocks)) if err != nil { return err } for partition, block := range blocks { pe.putInt32(partition) err = block.encode(pe, r.Version) if err != nil { return err } } getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1) } if r.Version >= 7 { err = pe.putArrayLength(len(r.forgotten)) if err != nil { return err } for topic, partitions := range r.forgotten { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(partitions)) if err != nil { return err } for _, partition := range partitions { pe.putInt32(partition) } } } if r.Version >= 11 { err = pe.putString(r.RackID) if err != nil { return err } } return nil } func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if _, err = pd.getInt32(); err != nil { return err } if r.MaxWaitTime, err = pd.getInt32(); err != nil { return err } if r.MinBytes, err = pd.getInt32(); err != nil { return err } if r.Version >= 3 { if r.MaxBytes, err = pd.getInt32(); err != nil { return err } } if r.Version >= 4 { isolation, err := pd.getInt8() if err != nil { return err } r.Isolation = IsolationLevel(isolation) } if r.Version >= 7 { r.SessionID, err = pd.getInt32() if err != nil { return err } r.SessionEpoch, err = pd.getInt32() if err != nil { return err } } topicCount, err := pd.getArrayLength() if err != nil { return err } if topicCount == 0 { return nil } r.blocks = make(map[string]map[int32]*fetchRequestBlock) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { return err } partitionCount, err := pd.getArrayLength() if err != nil { return err } r.blocks[topic] = make(map[int32]*fetchRequestBlock) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } fetchBlock := &fetchRequestBlock{} if err = fetchBlock.decode(pd, r.Version); err != nil { return err } r.blocks[topic][partition] = fetchBlock } } if r.Version >= 7 { forgottenCount, err := pd.getArrayLength() if err != nil { return err } r.forgotten = make(map[string][]int32) for i := 0; i < forgottenCount; i++ { topic, err := pd.getString() if err != nil { return err } partitionCount, err := pd.getArrayLength() if err != nil { return err } if partitionCount < 0 { return fmt.Errorf("partitionCount %d is invalid", partitionCount) } r.forgotten[topic] = make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } r.forgotten[topic][j] = partition } } } if r.Version >= 11 { r.RackID, err = pd.getString() if err != nil { return err } } return nil } func (r *FetchRequest) key() int16 { return 1 } func (r *FetchRequest) version() int16 { return r.Version } func (r *FetchRequest) headerVersion() int16 { return 1 } func (r *FetchRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 11 } func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 11: return V2_3_0_0 case 9, 10: return V2_1_0_0 case 8: return V2_0_0_0 case 7: return V1_1_0_0 case 6: return V1_0_0_0 case 4, 5: return V0_11_0_0 case 3: return V0_10_1_0 case 2: return V0_10_0_0 case 1: return V0_9_0_0 case 0: return V0_8_2_0 default: return V2_3_0_0 } } func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*fetchRequestBlock) } if r.Version >= 7 && r.forgotten == nil { r.forgotten = make(map[string][]int32) } if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*fetchRequestBlock) } tmp := new(fetchRequestBlock) tmp.Version = r.Version tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset if r.Version >= 9 { tmp.currentLeaderEpoch = leaderEpoch } r.blocks[topic][partitionID] = tmp } golang-github-ibm-sarama-1.43.2/fetch_request_test.go000066400000000000000000000055501461256741300226040ustar00rootroot00000000000000package sarama import "testing" var ( fetchRequestNoBlocks = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } fetchRequestWithProperties = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, 0x00, 0x00, 0x00, 0x00, } fetchRequestOneBlock = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56, } fetchRequestOneBlockV4 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56, } fetchRequestOneBlockV11 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x01, 0x00, 0x00, 0x00, 0xAA, // sessionID 0x00, 0x00, 0x00, 0xEE, // sessionEpoch 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, // partitionID 0x00, 0x00, 0x00, 0x66, // currentLeaderEpoch 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, // fetchOffset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // logStartOffset 0x00, 0x00, 0x00, 0x56, // maxBytes 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 'r', 'a', 'c', 'k', '0', '1', // rackID } ) func TestFetchRequest(t *testing.T) { t.Run("no blocks", func(t *testing.T) { request := new(FetchRequest) testRequest(t, "no blocks", request, fetchRequestNoBlocks) }) t.Run("with properties", func(t *testing.T) { request := new(FetchRequest) request.MaxWaitTime = 0x20 request.MinBytes = 0xEF testRequest(t, "with properties", request, fetchRequestWithProperties) }) t.Run("one block", func(t *testing.T) { request := new(FetchRequest) request.MaxWaitTime = 0 request.MinBytes = 0 request.AddBlock("topic", 0x12, 0x34, 0x56, -1) testRequest(t, "one block", request, fetchRequestOneBlock) }) t.Run("one block v4", func(t *testing.T) { request := new(FetchRequest) request.Version = 4 request.MaxBytes = 0xFF request.Isolation = ReadCommitted request.AddBlock("topic", 0x12, 0x34, 0x56, -1) testRequest(t, "one block v4", request, fetchRequestOneBlockV4) }) t.Run("one block v11 rackid and leader epoch", func(t *testing.T) { request := new(FetchRequest) request.Version = 11 request.MaxBytes = 0xFF request.Isolation = ReadCommitted request.SessionID = 0xAA request.SessionEpoch = 0xEE request.AddBlock("topic", 0x12, 0x34, 0x56, 0x66) request.RackID = "rack01" testRequest(t, "one block v11 rackid", request, fetchRequestOneBlockV11) }) } golang-github-ibm-sarama-1.43.2/fetch_response.go000066400000000000000000000345441461256741300217200ustar00rootroot00000000000000package sarama import ( "errors" "sort" "time" "github.com/rcrowley/go-metrics" ) const ( invalidLeaderEpoch = -1 invalidPreferredReplicaID = -1 ) type AbortedTransaction struct { // ProducerID contains the producer id associated with the aborted transaction. ProducerID int64 // FirstOffset contains the first offset in the aborted transaction. FirstOffset int64 } func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { if t.ProducerID, err = pd.getInt64(); err != nil { return err } if t.FirstOffset, err = pd.getInt64(); err != nil { return err } return nil } func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { pe.putInt64(t.ProducerID) pe.putInt64(t.FirstOffset) return nil } type FetchResponseBlock struct { // Err contains the error code, or 0 if there was no fetch error. Err KError // HighWatermarkOffset contains the current high water mark. HighWaterMarkOffset int64 // LastStableOffset contains the last stable offset (or LSO) of the // partition. This is the last offset such that the state of all // transactional records prior to this offset have been decided (ABORTED or // COMMITTED) LastStableOffset int64 LastRecordsBatchOffset *int64 // LogStartOffset contains the current log start offset. LogStartOffset int64 // AbortedTransactions contains the aborted transactions. AbortedTransactions []*AbortedTransaction // PreferredReadReplica contains the preferred read replica for the // consumer to use on its next fetch request PreferredReadReplica int32 // RecordsSet contains the record data. RecordsSet []*Records Partial bool Records *Records // deprecated: use FetchResponseBlock.RecordsSet } func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { metricRegistry := pd.metricRegistry() var sizeMetric metrics.Histogram if metricRegistry != nil { sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry) } tmp, err := pd.getInt16() if err != nil { return err } b.Err = KError(tmp) b.HighWaterMarkOffset, err = pd.getInt64() if err != nil { return err } if version >= 4 { b.LastStableOffset, err = pd.getInt64() if err != nil { return err } if version >= 5 { b.LogStartOffset, err = pd.getInt64() if err != nil { return err } } numTransact, err := pd.getArrayLength() if err != nil { return err } if numTransact >= 0 { b.AbortedTransactions = make([]*AbortedTransaction, numTransact) } for i := 0; i < numTransact; i++ { transact := new(AbortedTransaction) if err = transact.decode(pd); err != nil { return err } b.AbortedTransactions[i] = transact } } if version >= 11 { b.PreferredReadReplica, err = pd.getInt32() if err != nil { return err } } else { b.PreferredReadReplica = -1 } recordsSize, err := pd.getInt32() if err != nil { return err } if sizeMetric != nil { sizeMetric.Update(int64(recordsSize)) } recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { return err } b.RecordsSet = []*Records{} for recordsDecoder.remaining() > 0 { records := &Records{} if err := records.decode(recordsDecoder); err != nil { // If we have at least one decoded records, this is not an error if errors.Is(err, ErrInsufficientData) { if len(b.RecordsSet) == 0 { b.Partial = true } break } return err } b.LastRecordsBatchOffset, err = records.recordsOffset() if err != nil { return err } partial, err := records.isPartial() if err != nil { return err } n, err := records.numRecords() if err != nil { return err } if n > 0 || (partial && len(b.RecordsSet) == 0) { b.RecordsSet = append(b.RecordsSet, records) if b.Records == nil { b.Records = records } } overflow, err := records.isOverflow() if err != nil { return err } if partial || overflow { break } } return nil } func (b *FetchResponseBlock) numRecords() (int, error) { sum := 0 for _, records := range b.RecordsSet { count, err := records.numRecords() if err != nil { return 0, err } sum += count } return sum, nil } func (b *FetchResponseBlock) isPartial() (bool, error) { if b.Partial { return true, nil } if len(b.RecordsSet) == 1 { return b.RecordsSet[0].isPartial() } return false, nil } func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) pe.putInt64(b.HighWaterMarkOffset) if version >= 4 { pe.putInt64(b.LastStableOffset) if version >= 5 { pe.putInt64(b.LogStartOffset) } if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { return err } for _, transact := range b.AbortedTransactions { if err = transact.encode(pe); err != nil { return err } } } if version >= 11 { pe.putInt32(b.PreferredReadReplica) } pe.push(&lengthField{}) for _, records := range b.RecordsSet { err = records.encode(pe) if err != nil { return err } } return pe.pop() } func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself at := b.AbortedTransactions sort.Slice( at, func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, ) return at } type FetchResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTime contains the duration in milliseconds for which the request // was throttled due to a quota violation, or zero if the request did not // violate any quota. ThrottleTime time.Duration // ErrorCode contains the top level response error code. ErrorCode int16 // SessionID contains the fetch session ID, or 0 if this is not part of a fetch session. SessionID int32 // Blocks contains the response topics. Blocks map[string]map[int32]*FetchResponseBlock LogAppendTime bool Timestamp time.Time } func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 1 { throttle, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(throttle) * time.Millisecond } if r.Version >= 7 { r.ErrorCode, err = pd.getInt16() if err != nil { return err } r.SessionID, err = pd.getInt32() if err != nil { return err } } numTopics, err := pd.getArrayLength() if err != nil { return err } r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { return err } numBlocks, err := pd.getArrayLength() if err != nil { return err } r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() if err != nil { return err } block := new(FetchResponseBlock) err = block.decode(pd, version) if err != nil { return err } r.Blocks[name][id] = block } } return nil } func (r *FetchResponse) encode(pe packetEncoder) (err error) { if r.Version >= 1 { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } if r.Version >= 7 { pe.putInt16(r.ErrorCode) pe.putInt32(r.SessionID) } err = pe.putArrayLength(len(r.Blocks)) if err != nil { return err } for topic, partitions := range r.Blocks { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(partitions)) if err != nil { return err } for id, block := range partitions { pe.putInt32(id) err = block.encode(pe, r.Version) if err != nil { return err } } } return nil } func (r *FetchResponse) key() int16 { return 1 } func (r *FetchResponse) version() int16 { return r.Version } func (r *FetchResponse) headerVersion() int16 { return 0 } func (r *FetchResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 11 } func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { case 11: return V2_3_0_0 case 9, 10: return V2_1_0_0 case 8: return V2_0_0_0 case 7: return V1_1_0_0 case 6: return V1_0_0_0 case 4, 5: return V0_11_0_0 case 3: return V0_10_1_0 case 2: return V0_10_0_0 case 1: return V0_9_0_0 case 0: return V0_8_2_0 default: return V2_3_0_0 } } func (r *FetchResponse) throttleTime() time.Duration { return r.ThrottleTime } func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { return nil } if r.Blocks[topic] == nil { return nil } return r.Blocks[topic][partition] } func (r *FetchResponse) AddError(topic string, partition int32, err KError) { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } partitions, ok := r.Blocks[topic] if !ok { partitions = make(map[int32]*FetchResponseBlock) r.Blocks[topic] = partitions } frb, ok := partitions[partition] if !ok { frb = new(FetchResponseBlock) partitions[partition] = frb } frb.Err = err } func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } partitions, ok := r.Blocks[topic] if !ok { partitions = make(map[int32]*FetchResponseBlock) r.Blocks[topic] = partitions } frb, ok := partitions[partition] if !ok { frb = new(FetchResponseBlock) partitions[partition] = frb } return frb } func encodeKV(key, value Encoder) ([]byte, []byte) { var kb []byte var vb []byte if key != nil { kb, _ = key.Encode() } if value != nil { vb, _ = value.Encode() } return kb, vb } func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) if r.LogAppendTime { timestamp = r.Timestamp } msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} msgBlock := &MessageBlock{Msg: msg, Offset: offset} if len(frb.RecordsSet) == 0 { records := newLegacyRecords(&MessageSet{}) frb.RecordsSet = []*Records{&records} } set := frb.RecordsSet[0].MsgSet set.Messages = append(set.Messages, msgBlock) } func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) if len(frb.RecordsSet) == 0 { records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) frb.RecordsSet = []*Records{&records} } batch := frb.RecordsSet[0].RecordBatch rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} batch.addRecord(rec) } // AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp // But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse // Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) batch := &RecordBatch{ Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp, FirstOffset: offset, LastOffsetDelta: 0, ProducerID: producerID, IsTransactional: isTransactional, } rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} batch.addRecord(rec) records.RecordBatch = batch frb.RecordsSet = append(frb.RecordsSet, &records) } func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { frb := r.getOrCreateBlock(topic, partition) // batch batch := &RecordBatch{ Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp, FirstOffset: offset, LastOffsetDelta: 0, ProducerID: producerID, IsTransactional: true, Control: true, } // records records := newDefaultRecords(nil) records.RecordBatch = batch // record crAbort := ControlRecord{ Version: 0, Type: recordType, } crKey := &realEncoder{raw: make([]byte, 4)} crValue := &realEncoder{raw: make([]byte, 6)} crAbort.encode(crKey, crValue) rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} batch.addRecord(rec) frb.RecordsSet = append(frb.RecordsSet, &records) } func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) } func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) } func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) } func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { // define controlRecord key and value r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) } func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { frb := r.getOrCreateBlock(topic, partition) if len(frb.RecordsSet) == 0 { records := newDefaultRecords(&RecordBatch{Version: 2}) frb.RecordsSet = []*Records{&records} } batch := frb.RecordsSet[0].RecordBatch batch.LastOffsetDelta = offset } func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { frb := r.getOrCreateBlock(topic, partition) frb.LastStableOffset = offset } golang-github-ibm-sarama-1.43.2/fetch_response_test.go000066400000000000000000000423201461256741300227460ustar00rootroot00000000000000package sarama import ( "bytes" "errors" "testing" ) var ( emptyFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x00, } oneMessageFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x1C, // messageSet 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // message 0x23, 0x96, 0x4a, 0xf7, // CRC 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, } overflowMessageFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x30, // messageSet 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // message 0x23, 0x96, 0x4a, 0xf7, // CRC 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, // overflow messageSet 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, // overflow bytes 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } oneRecordFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, 0x00, 0x01, // Number of Topics 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Number of Partitions 0x00, 0x00, 0x00, 0x05, // Partition 0x00, 0x01, // Error 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions 0x00, 0x00, 0x00, 0x52, // Records length // recordBatch 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x02, 0xDB, 0x47, 0x14, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // record 0x28, 0x00, 0x0A, 0x00, 0x08, 0x01, 0x02, 0x03, 0x04, 0x06, 0x05, 0x06, 0x07, 0x02, 0x06, 0x08, 0x09, 0x0A, 0x04, 0x0B, 0x0C, } partialFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, 0x00, 0x01, // Number of Topics 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Number of Partitions 0x00, 0x00, 0x00, 0x05, // Partition 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions 0x00, 0x00, 0x00, 0x40, // Records length 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x02, 0xDB, 0x47, 0x14, 0xC9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // record 0x28, 0x00, 0x00, } emptyRecordsFetchResponsev11 = []byte{ 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, // Fetch session 0x00, 0x00, 0x00, 0x01, // Num topic 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Num partition 0x00, 0x00, 0x00, 0x05, // Partition 0x00, 0x00, // Error 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Log start offset 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions 0xff, 0xff, 0xff, 0xff, // Replica id 0x00, 0x00, 0x00, 0x3D, // Batch size // recordBatch 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Offset 0x00, 0x00, 0x00, 0x31, // Message size 0x00, 0x00, 0x00, 0x00, // Leader epoch 0x02, // Magic byte 0x14, 0xE0, 0x7A, 0x62, // CRC 0x00, 0x00, // Flags 0x00, 0x00, 0x00, 0x00, // Last offset delta 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, // First timestamp 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, // Last timestamp 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Producer id 0x00, 0x00, // Producer epoch 0x00, 0x00, 0x00, 0x3d, // Base sequence 0x00, 0x00, 0x00, 0x00, // Records size } oneMessageFetchResponseV4 = []byte{ 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x00, 0x00, 0x01, // Number of Topics 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Number of Partitions 0x00, 0x00, 0x00, 0x05, // Partition 0x00, 0x01, // Error 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions 0x00, 0x00, 0x00, 0x1C, // messageSet 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // message 0x23, 0x96, 0x4a, 0xf7, // CRC 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, } preferredReplicaFetchResponseV11 = []byte{ 0x00, 0x00, 0x00, 0x00, // ThrottleTime 0x00, 0x02, // ErrorCode 0x00, 0x00, 0x00, 0xAC, // SessionID 0x00, 0x00, 0x00, 0x01, // Number of Topics 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Number of Partitions 0x00, 0x00, 0x00, 0x05, // Partition 0x00, 0x01, // Error 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x09, // Last Stable Offset 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, // Log Start Offset 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions 0x00, 0x00, 0x00, 0x03, // Preferred Read Replica 0x00, 0x00, 0x00, 0x1C, // messageSet 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // message 0x23, 0x96, 0x4a, 0xf7, // CRC 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, } ) func TestEmptyFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0) if len(response.Blocks) != 0 { t.Error("Decoding produced topic blocks where there were none.") } } func TestOneMessageFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrOffsetOutOfRange) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.PreferredReadReplica != -1 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding detected a partial trailing message where there wasn't one.") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 1 { t.Fatal("Decoding produced incorrect number of messages.") } msgBlock := block.RecordsSet[0].MsgSet.Messages[0] if msgBlock.Offset != 0x550000 { t.Error("Decoding produced incorrect message offset.") } msg := msgBlock.Msg if msg.Codec != CompressionNone { t.Error("Decoding produced incorrect message compression.") } if msg.Key != nil { t.Error("Decoding produced message key where there was none.") } if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { t.Error("Decoding produced incorrect message value.") } } func TestOverflowMessageFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "overflow message", &response, overflowMessageFetchResponse, 0) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrOffsetOutOfRange) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } partial, err := block.Records.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding detected a partial trailing message where there wasn't one.") } overflow, err := block.Records.isOverflow() if err != nil { t.Fatalf("Unexpected error: %v", err) } if !overflow { t.Error("Decoding detected a partial trailing message where there wasn't one.") } n, err := block.Records.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 1 { t.Fatal("Decoding produced incorrect number of messages.") } msgBlock := block.Records.MsgSet.Messages[0] if msgBlock.Offset != 0x550000 { t.Error("Decoding produced incorrect message offset.") } msg := msgBlock.Msg if msg.Codec != CompressionNone { t.Error("Decoding produced incorrect message compression.") } if msg.Key != nil { t.Error("Decoding produced message key where there was none.") } if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { t.Error("Decoding produced incorrect message value.") } } func TestOneRecordFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "one record", &response, oneRecordFetchResponse, 4) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrOffsetOutOfRange) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.PreferredReadReplica != -1 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding detected a partial trailing record where there wasn't one.") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 1 { t.Fatal("Decoding produced incorrect number of records.") } rec := block.RecordsSet[0].RecordBatch.Records[0] if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) { t.Error("Decoding produced incorrect record key.") } if !bytes.Equal(rec.Value, []byte{0x05, 0x06, 0x07}) { t.Error("Decoding produced incorrect record value.") } } func TestPartailFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "partial record", &response, partialFetchResponse, 4) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrNoError) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.PreferredReadReplica != -1 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if !partial { t.Error("Decoding not a partial trailing record") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 0 { t.Fatal("Decoding produced incorrect number of records.") } } func TestEmptyRecordsFetchResponse(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "empty record", &response, emptyRecordsFetchResponsev11, 11) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrNoError) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.PreferredReadReplica != -1 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding a partial trailing record") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 0 { t.Fatal("Decoding produced incorrect number of records.") } if *block.LastRecordsBatchOffset != 0 { t.Fatal("Last records batch offset is incorrect.") } } func TestOneMessageFetchResponseV4(t *testing.T) { response := FetchResponse{} testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4) if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrOffsetOutOfRange) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.PreferredReadReplica != -1 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding detected a partial trailing record where there wasn't one.") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 1 { t.Fatal("Decoding produced incorrect number of records.") } msgBlock := block.RecordsSet[0].MsgSet.Messages[0] if msgBlock.Offset != 0x550000 { t.Error("Decoding produced incorrect message offset.") } msg := msgBlock.Msg if msg.Codec != CompressionNone { t.Error("Decoding produced incorrect message compression.") } if msg.Key != nil { t.Error("Decoding produced message key where there was none.") } if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { t.Error("Decoding produced incorrect message value.") } } func TestPreferredReplicaFetchResponseV11(t *testing.T) { response := FetchResponse{} testVersionDecodable( t, "preferred replica fetch response v11", &response, preferredReplicaFetchResponseV11, 11) if response.ErrorCode != 0x0002 { t.Fatal("Decoding produced incorrect error code.") } if response.SessionID != 0x000000AC { t.Fatal("Decoding produced incorrect session ID.") } if len(response.Blocks) != 1 { t.Fatal("Decoding produced incorrect number of topic blocks.") } if len(response.Blocks["topic"]) != 1 { t.Fatal("Decoding produced incorrect number of partition blocks for topic.") } block := response.GetBlock("topic", 5) if block == nil { t.Fatal("GetBlock didn't return block.") } if !errors.Is(block.Err, ErrOffsetOutOfRange) { t.Error("Decoding didn't produce correct error code.") } if block.HighWaterMarkOffset != 0x10101010 { t.Error("Decoding didn't produce correct high water mark offset.") } if block.LastStableOffset != 0x10101009 { t.Error("Decoding didn't produce correct last stable offset.") } if block.LogStartOffset != 0x01010101 { t.Error("Decoding didn't produce correct log start offset.") } if block.PreferredReadReplica != 0x0003 { t.Error("Decoding didn't produce correct preferred read replica.") } partial, err := block.isPartial() if err != nil { t.Fatalf("Unexpected error: %v", err) } if partial { t.Error("Decoding detected a partial trailing record where there wasn't one.") } n, err := block.numRecords() if err != nil { t.Fatalf("Unexpected error: %v", err) } if n != 1 { t.Fatal("Decoding produced incorrect number of records.") } msgBlock := block.RecordsSet[0].MsgSet.Messages[0] if msgBlock.Offset != 0x550000 { t.Error("Decoding produced incorrect message offset.") } msg := msgBlock.Msg if msg.Codec != CompressionNone { t.Error("Decoding produced incorrect message compression.") } if msg.Key != nil { t.Error("Decoding produced message key where there was none.") } if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { t.Error("Decoding produced incorrect message value.") } } golang-github-ibm-sarama-1.43.2/find_coordinator_request.go000066400000000000000000000024061461256741300237740ustar00rootroot00000000000000package sarama type CoordinatorType int8 const ( CoordinatorGroup CoordinatorType = iota CoordinatorTransaction ) type FindCoordinatorRequest struct { Version int16 CoordinatorKey string CoordinatorType CoordinatorType } func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { if err := pe.putString(f.CoordinatorKey); err != nil { return err } if f.Version >= 1 { pe.putInt8(int8(f.CoordinatorType)) } return nil } func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { if f.CoordinatorKey, err = pd.getString(); err != nil { return err } if version >= 1 { f.Version = version coordinatorType, err := pd.getInt8() if err != nil { return err } f.CoordinatorType = CoordinatorType(coordinatorType) } return nil } func (f *FindCoordinatorRequest) key() int16 { return 10 } func (f *FindCoordinatorRequest) version() int16 { return f.Version } func (r *FindCoordinatorRequest) headerVersion() int16 { return 1 } func (f *FindCoordinatorRequest) isValidVersion() bool { return f.Version >= 0 && f.Version <= 2 } func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { case 2: return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } golang-github-ibm-sarama-1.43.2/find_coordinator_request_test.go000066400000000000000000000013351461256741300250330ustar00rootroot00000000000000package sarama import "testing" var ( findCoordinatorRequestConsumerGroup = []byte{ 0, 5, 'g', 'r', 'o', 'u', 'p', 0, } findCoordinatorRequestTransaction = []byte{ 0, 13, 't', 'r', 'a', 'n', 's', 'a', 'c', 't', 'i', 'o', 'n', 'i', 'd', 1, } ) func TestFindCoordinatorRequest(t *testing.T) { req := &FindCoordinatorRequest{ Version: 1, CoordinatorKey: "group", CoordinatorType: CoordinatorGroup, } testRequest(t, "version 1 - group", req, findCoordinatorRequestConsumerGroup) req = &FindCoordinatorRequest{ Version: 1, CoordinatorKey: "transactionid", CoordinatorType: CoordinatorTransaction, } testRequest(t, "version 1 - transaction", req, findCoordinatorRequestTransaction) } golang-github-ibm-sarama-1.43.2/find_coordinator_response.go000066400000000000000000000037651461256741300241530ustar00rootroot00000000000000package sarama import ( "time" ) var NoNode = &Broker{id: -1, addr: ":-1"} type FindCoordinatorResponse struct { Version int16 ThrottleTime time.Duration Err KError ErrMsg *string Coordinator *Broker } func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { if version >= 1 { f.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond } tmp, err := pd.getInt16() if err != nil { return err } f.Err = KError(tmp) if version >= 1 { if f.ErrMsg, err = pd.getNullableString(); err != nil { return err } } coordinator := new(Broker) // The version is hardcoded to 0, as version 1 of the Broker-decode // contains the rack-field which is not present in the FindCoordinatorResponse. if err := coordinator.decode(pd, 0); err != nil { return err } if coordinator.addr == ":0" { return nil } f.Coordinator = coordinator return nil } func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { if f.Version >= 1 { pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) } pe.putInt16(int16(f.Err)) if f.Version >= 1 { if err := pe.putNullableString(f.ErrMsg); err != nil { return err } } coordinator := f.Coordinator if coordinator == nil { coordinator = NoNode } if err := coordinator.encode(pe, 0); err != nil { return err } return nil } func (f *FindCoordinatorResponse) key() int16 { return 10 } func (f *FindCoordinatorResponse) version() int16 { return f.Version } func (r *FindCoordinatorResponse) headerVersion() int16 { return 0 } func (f *FindCoordinatorResponse) isValidVersion() bool { return f.Version >= 0 && f.Version <= 2 } func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { case 2: return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } func (r *FindCoordinatorResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/find_coordinator_response_test.go000066400000000000000000000036001461256741300251760ustar00rootroot00000000000000package sarama import ( "testing" "time" ) func TestFindCoordinatorResponse(t *testing.T) { errMsg := "kaboom" for _, tc := range []struct { desc string response *FindCoordinatorResponse encoded []byte }{{ desc: "version 0 - no error", response: &FindCoordinatorResponse{ Version: 0, Err: ErrNoError, Coordinator: &Broker{ id: 7, addr: "host:9092", }, }, encoded: []byte{ 0, 0, // Err 0, 0, 0, 7, // Coordinator.ID 0, 4, 'h', 'o', 's', 't', // Coordinator.Host 0, 0, 35, 132, // Coordinator.Port }, }, { desc: "version 1 - no error", response: &FindCoordinatorResponse{ Version: 1, ThrottleTime: 100 * time.Millisecond, Err: ErrNoError, Coordinator: &Broker{ id: 7, addr: "host:9092", }, }, encoded: []byte{ 0, 0, 0, 100, // ThrottleTime 0, 0, // Err 255, 255, // ErrMsg: empty 0, 0, 0, 7, // Coordinator.ID 0, 4, 'h', 'o', 's', 't', // Coordinator.Host 0, 0, 35, 132, // Coordinator.Port }, }, { desc: "version 0 - error", response: &FindCoordinatorResponse{ Version: 0, Err: ErrConsumerCoordinatorNotAvailable, Coordinator: NoNode, }, encoded: []byte{ 0, 15, // Err 255, 255, 255, 255, // Coordinator.ID: -1 0, 0, // Coordinator.Host: "" 255, 255, 255, 255, // Coordinator.Port: -1 }, }, { desc: "version 1 - error", response: &FindCoordinatorResponse{ Version: 1, ThrottleTime: 100 * time.Millisecond, Err: ErrConsumerCoordinatorNotAvailable, ErrMsg: &errMsg, Coordinator: NoNode, }, encoded: []byte{ 0, 0, 0, 100, // ThrottleTime 0, 15, // Err 0, 6, 'k', 'a', 'b', 'o', 'o', 'm', // ErrMsg 255, 255, 255, 255, // Coordinator.ID: -1 0, 0, // Coordinator.Host: "" 255, 255, 255, 255, // Coordinator.Port: -1 }, }} { testResponse(t, tc.desc, tc.response, tc.encoded) } } golang-github-ibm-sarama-1.43.2/functional_admin_test.go000066400000000000000000000112431461256741300232510ustar00rootroot00000000000000//go:build functional package sarama import ( "testing" ) func TestFuncAdminQuotas(t *testing.T) { checkKafkaVersion(t, "2.6.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) kafkaVersion, err := ParseKafkaVersion(FunctionalTestEnv.KafkaVersion) if err != nil { t.Fatal(err) } config := NewFunctionalTestConfig() config.Version = kafkaVersion adminClient, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, adminClient) // Check that we can read the quotas, and that they are empty quotas, err := adminClient.DescribeClientQuotas(nil, false) if err != nil { t.Fatal(err) } if len(quotas) != 0 { t.Fatalf("Expected quotas to be empty at start, found: %v", quotas) } // Put a quota on default user // /config/users/ defaultUser := []QuotaEntityComponent{{ EntityType: QuotaEntityUser, MatchType: QuotaMatchDefault, }} produceOp := ClientQuotasOp{ Key: "producer_byte_rate", Value: 1024000, } if err = adminClient.AlterClientQuotas(defaultUser, produceOp, false); err != nil { t.Fatal(err) } // Check that we now have a quota entry quotas, err = adminClient.DescribeClientQuotas(nil, false) if err != nil { t.Fatal(err) } if len(quotas) == 0 { t.Fatal("Expected not empty quotas") } if len(quotas) > 1 { t.Fatalf("Expected one quota entry, found: %v", quotas) } // Put a quota on specific client-id for a specific user // /config/users//clients/ specificUserClientID := []QuotaEntityComponent{ { EntityType: QuotaEntityUser, MatchType: QuotaMatchExact, Name: "sarama", }, { EntityType: QuotaEntityClientID, MatchType: QuotaMatchExact, Name: "sarama-consumer", }, } consumeOp := ClientQuotasOp{ Key: "consumer_byte_rate", Value: 2048000, } if err = adminClient.AlterClientQuotas(specificUserClientID, consumeOp, false); err != nil { t.Fatal(err) } // Check that we can query a specific quota entry userFilter := QuotaFilterComponent{ EntityType: QuotaEntityUser, MatchType: QuotaMatchExact, Match: "sarama", } clientFilter := QuotaFilterComponent{ EntityType: QuotaEntityClientID, MatchType: QuotaMatchExact, Match: "sarama-consumer", } quotas, err = adminClient.DescribeClientQuotas([]QuotaFilterComponent{userFilter, clientFilter}, true) if err != nil { t.Fatal(err) } if len(quotas) == 0 { t.Fatal("Expected not empty quotas") } if len(quotas) > 1 { t.Fatalf("Expected one quota entry, found: %v", quotas) } if quotas[0].Values[consumeOp.Key] != consumeOp.Value { t.Fatalf("Expected specific quota value to be %f, found: %v", consumeOp.Value, quotas[0].Values[consumeOp.Key]) } // Remove quota entries deleteProduceOp := ClientQuotasOp{ Key: produceOp.Key, Remove: true, } if err = adminClient.AlterClientQuotas(defaultUser, deleteProduceOp, false); err != nil { t.Fatal(err) } deleteConsumeOp := ClientQuotasOp{ Key: consumeOp.Key, Remove: true, } if err = adminClient.AlterClientQuotas(specificUserClientID, deleteConsumeOp, false); err != nil { t.Fatal(err) } } func TestFuncAdminDescribeGroups(t *testing.T) { checkKafkaVersion(t, "2.3.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) group1 := testFuncConsumerGroupID(t) group2 := testFuncConsumerGroupID(t) kafkaVersion, err := ParseKafkaVersion(FunctionalTestEnv.KafkaVersion) if err != nil { t.Fatal(err) } config := NewFunctionalTestConfig() config.Version = kafkaVersion adminClient, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, adminClient) config1 := NewFunctionalTestConfig() config1.ClientID = "M1" config1.Version = V2_3_0_0 config1.Consumer.Offsets.Initial = OffsetNewest m1 := runTestFuncConsumerGroupMemberWithConfig(t, config1, group1, 100, nil, "test.4") defer m1.Close() config2 := NewFunctionalTestConfig() config2.ClientID = "M2" config2.Version = V2_3_0_0 config2.Consumer.Offsets.Initial = OffsetNewest config2.Consumer.Group.InstanceId = "Instance2" m2 := runTestFuncConsumerGroupMemberWithConfig(t, config2, group2, 100, nil, "test.4") defer m2.Close() m1.WaitForState(2) m2.WaitForState(2) res, err := adminClient.DescribeConsumerGroups([]string{group1, group2}) if err != nil { t.Fatal(err) } if len(res) != 2 { t.Errorf("group description should be 2, got %v\n", len(res)) } if len(res[0].Members) != 1 { t.Errorf("should have 1 members in group , got %v\n", len(res[0].Members)) } if len(res[1].Members) != 1 { t.Errorf("should have 1 members in group , got %v\n", len(res[1].Members)) } m1.AssertCleanShutdown() m2.AssertCleanShutdown() } golang-github-ibm-sarama-1.43.2/functional_client_test.go000066400000000000000000000045601461256741300234430ustar00rootroot00000000000000//go:build functional package sarama import ( "errors" "fmt" "testing" "time" ) func TestFuncConnectionFailure(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) FunctionalTestEnv.Proxies["kafka1"].Enabled = false SaveProxy(t, "kafka1") config := NewFunctionalTestConfig() config.Metadata.Retry.Max = 1 _, err := NewClient([]string{FunctionalTestEnv.KafkaBrokerAddrs[0]}, config) if !errors.Is(err, ErrOutOfBrokers) { t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) } } func TestFuncClientMetadata(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.Metadata.Retry.Max = 1 config.Metadata.Retry.Backoff = 10 * time.Millisecond client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } if err := client.RefreshMetadata("unknown_topic"); !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Expected ErrUnknownTopicOrPartition, got", err) } if _, err := client.Leader("unknown_topic", 0); !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Expected ErrUnknownTopicOrPartition, got", err) } if _, err := client.Replicas("invalid/topic", 0); !errors.Is(err, ErrUnknownTopicOrPartition) && !errors.Is(err, ErrInvalidTopic) { t.Error("Expected ErrUnknownTopicOrPartition or ErrInvalidTopic, got", err) } partitions, err := client.Partitions("test.4") if err != nil { t.Error(err) } if len(partitions) != 4 { t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) } partitions, err = client.Partitions("test.1") if err != nil { t.Error(err) } if len(partitions) != 1 { t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) } safeClose(t, client) } func TestFuncClientCoordinator(t *testing.T) { checkKafkaVersion(t, "0.8.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) if err != nil { t.Fatal(err) } if connected, err := broker.Connected(); !connected || err != nil { t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) } } safeClose(t, client) } golang-github-ibm-sarama-1.43.2/functional_consumer_follower_fetch_test.go000066400000000000000000000050261461256741300271000ustar00rootroot00000000000000//go:build functional package sarama import ( "context" "fmt" "strconv" "sync" "testing" "time" ) func TestConsumerFetchFollowerFailover(t *testing.T) { const ( topic = "test.1" numMsg = 1000 ) newConfig := func() *Config { config := NewFunctionalTestConfig() config.ClientID = t.Name() config.Producer.Return.Successes = true return config } config := newConfig() // pick a partition and find the ID for one of the follower brokers admin, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer admin.Close() metadata, err := admin.DescribeTopics([]string{topic}) if err != nil { t.Fatal(err) } partition := metadata[0].Partitions[0] leader := metadata[0].Partitions[0].Leader follower := int32(-1) for _, replica := range partition.Replicas { if replica == leader { continue } follower = replica break } t.Logf("topic %s has leader kafka-%d and our chosen follower is kafka-%d", topic, leader, follower) // match our clientID to the given broker so our requests should end up fetching from that follower config.RackID = strconv.FormatInt(int64(follower), 10) consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } pc, err := consumer.ConsumePartition(topic, partition.ID, OffsetOldest) if err != nil { t.Fatal(err) } defer func() { pc.Close() consumer.Close() }() producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer producer.Close() var wg sync.WaitGroup wg.Add(numMsg) go func() { for i := 0; i < numMsg; i++ { msg := &ProducerMessage{ Topic: topic, Key: nil, Value: StringEncoder(fmt.Sprintf("%s %-3d", t.Name(), i)), } if _, offset, err := producer.SendMessage(msg); err != nil { t.Error(i, err) } else if offset%50 == 0 { t.Logf("sent: %d\n", offset) } wg.Done() time.Sleep(time.Millisecond * 25) } }() i := 0 for ; i < numMsg/8; i++ { msg := <-pc.Messages() if msg.Offset%50 == 0 { t.Logf("recv: %d\n", msg.Offset) } } if err := stopDockerTestBroker(context.Background(), follower); err != nil { t.Fatal(err) } for ; i < numMsg/3; i++ { msg := <-pc.Messages() if msg.Offset%50 == 0 { t.Logf("recv: %d\n", msg.Offset) } } if err := startDockerTestBroker(context.Background(), follower); err != nil { t.Fatal(err) } for ; i < numMsg; i++ { msg := <-pc.Messages() if msg.Offset%50 == 0 { t.Logf("recv: %d\n", msg.Offset) } } wg.Wait() } golang-github-ibm-sarama-1.43.2/functional_consumer_group_test.go000066400000000000000000000411541461256741300252340ustar00rootroot00000000000000//go:build functional package sarama import ( "context" "errors" "fmt" "log" "reflect" "strings" "sync" "sync/atomic" "testing" "time" ) func TestFuncConsumerGroupPartitioning(t *testing.T) { checkKafkaVersion(t, "0.10.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) // start M1 m1 := runTestFuncConsumerGroupMember(t, groupID, "M1", 0, nil) defer m1.Stop() m1.WaitForState(2) m1.WaitForClaims(map[string]int{"test.4": 4}) m1.WaitForHandlers(4) // start M2 m2 := runTestFuncConsumerGroupMember(t, groupID, "M2", 0, nil, "test.1", "test.4") defer m2.Stop() m2.WaitForState(2) // assert that claims are shared among both members m1.WaitForClaims(map[string]int{"test.4": 2}) m1.WaitForHandlers(2) m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 2}) m2.WaitForHandlers(3) // shutdown M1, wait for M2 to take over m1.AssertCleanShutdown() m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 4}) m2.WaitForHandlers(5) // shutdown M2 m2.AssertCleanShutdown() } func TestFuncConsumerGroupPartitioningStateful(t *testing.T) { checkKafkaVersion(t, "0.10.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) m1s := newTestStatefulStrategy(t) config := defaultConfig("M1") config.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{m1s} config.Consumer.Group.Member.UserData = []byte(config.ClientID) // start M1 m1 := runTestFuncConsumerGroupMemberWithConfig(t, config, groupID, 0, nil) defer m1.Stop() m1.WaitForState(2) m1.WaitForClaims(map[string]int{"test.4": 4}) m1.WaitForHandlers(4) m1s.AssertInitialValues(1) m2s := newTestStatefulStrategy(t) config = defaultConfig("M2") config.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{m2s} config.Consumer.Group.Member.UserData = []byte(config.ClientID) // start M2 m2 := runTestFuncConsumerGroupMemberWithConfig(t, config, groupID, 0, nil, "test.1", "test.4") defer m2.Stop() m2.WaitForState(2) m1s.AssertInitialValues(2) m2s.AssertNoInitialValues() // assert that claims are shared among both members m1.WaitForClaims(map[string]int{"test.4": 2}) m1.WaitForHandlers(2) m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 2}) m2.WaitForHandlers(3) // shutdown M1, wait for M2 to take over m1.AssertCleanShutdown() m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 4}) m2.WaitForHandlers(5) m2s.AssertNoInitialValues() } func TestFuncConsumerGroupExcessConsumers(t *testing.T) { checkKafkaVersion(t, "0.10.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) // start members m1 := runTestFuncConsumerGroupMember(t, groupID, "M1", 0, nil) defer m1.Stop() m2 := runTestFuncConsumerGroupMember(t, groupID, "M2", 0, nil) defer m2.Stop() m3 := runTestFuncConsumerGroupMember(t, groupID, "M3", 0, nil) defer m3.Stop() m4 := runTestFuncConsumerGroupMember(t, groupID, "M4", 0, nil) defer m4.Stop() m1.WaitForClaims(map[string]int{"test.4": 1}) m2.WaitForClaims(map[string]int{"test.4": 1}) m3.WaitForClaims(map[string]int{"test.4": 1}) m4.WaitForClaims(map[string]int{"test.4": 1}) // start M5 m5 := runTestFuncConsumerGroupMember(t, groupID, "M5", 0, nil) defer m5.Stop() m5.WaitForState(1) m5.AssertNoErrs() // assert that claims are shared among both members m4.AssertCleanShutdown() m5.WaitForState(2) m5.WaitForClaims(map[string]int{"test.4": 1}) // shutdown everything m1.AssertCleanShutdown() m2.AssertCleanShutdown() m3.AssertCleanShutdown() m5.AssertCleanShutdown() } func TestFuncConsumerGroupRebalanceAfterAddingPartitions(t *testing.T) { checkKafkaVersion(t, "0.10.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() admin, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer func() { _ = admin.Close() }() groupID := testFuncConsumerGroupID(t) // start M1 m1 := runTestFuncConsumerGroupMember(t, groupID, "M1", 0, nil, "test.1") defer m1.Stop() m1.WaitForClaims(map[string]int{"test.1": 1}) m1.WaitForHandlers(1) // start M2 m2 := runTestFuncConsumerGroupMember(t, groupID, "M2", 0, nil, "test.1_to_2") defer m2.Stop() m2.WaitForClaims(map[string]int{"test.1_to_2": 1}) m1.WaitForHandlers(1) // add a new partition to topic "test.1_to_2" err = admin.CreatePartitions("test.1_to_2", 2, nil, false) if err != nil { t.Fatal(err) } // assert that claims are shared among both members m2.WaitForClaims(map[string]int{"test.1_to_2": 2}) m2.WaitForHandlers(2) m1.WaitForClaims(map[string]int{"test.1": 1}) m1.WaitForHandlers(1) m1.AssertCleanShutdown() m2.AssertCleanShutdown() } func TestFuncConsumerGroupFuzzy(t *testing.T) { checkKafkaVersion(t, "0.10.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) if err := testFuncConsumerGroupFuzzySeed("test.4"); err != nil { t.Fatal(err) } groupID := testFuncConsumerGroupID(t) sink := &testFuncConsumerGroupSink{msgs: make(chan testFuncConsumerGroupMessage, 20000)} waitForMessages := func(t *testing.T, n int) { t.Helper() for i := 0; i < 600; i++ { if sink.Len() >= n { break } time.Sleep(100 * time.Millisecond) } if sz := sink.Len(); sz < n { log.Fatalf("expected to consume %d messages, but consumed %d", n, sz) } } defer runTestFuncConsumerGroupMember(t, groupID, "M1", 1500, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M2", 3000, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M3", 1500, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M4", 200, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M5", 100, sink).Stop() waitForMessages(t, 3000) defer runTestFuncConsumerGroupMember(t, groupID, "M6", 300, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M7", 400, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M8", 500, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M9", 2000, sink).Stop() waitForMessages(t, 8000) defer runTestFuncConsumerGroupMember(t, groupID, "M10", 1000, sink).Stop() waitForMessages(t, 10000) defer runTestFuncConsumerGroupMember(t, groupID, "M11", 1000, sink).Stop() defer runTestFuncConsumerGroupMember(t, groupID, "M12", 2500, sink).Stop() waitForMessages(t, 12000) defer runTestFuncConsumerGroupMember(t, groupID, "M13", 1000, sink).Stop() waitForMessages(t, 15000) if umap := sink.Close(); len(umap) != 15000 { dupes := make(map[string][]string) for k, v := range umap { if len(v) > 1 { dupes[k] = v } } t.Fatalf("expected %d unique messages to be consumed but got %d, including %d duplicates:\n%v", 15000, len(umap), len(dupes), dupes) } } func TestFuncConsumerGroupOffsetDeletion(t *testing.T) { checkKafkaVersion(t, "2.4.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) defer safeClose(t, client) if err != nil { t.Fatal(err) } // create a consumer group with offsets on // - topic test.1 partition 0 // - topic test.4 partition 0 groupID := testFuncConsumerGroupID(t) consumerGroup, err := NewConsumerGroupFromClient(groupID, client) if err != nil { t.Fatal(err) } defer safeClose(t, consumerGroup) offsetMgr, _ := NewOffsetManagerFromClient(groupID, client) defer safeClose(t, offsetMgr) markOffset(t, offsetMgr, "test.1", 0, 1) markOffset(t, offsetMgr, "test.4", 0, 2) offsetMgr.Commit() admin, err := NewClusterAdminFromClient(client) if err != nil { t.Fatal(err) } offsetFetch, err := admin.ListConsumerGroupOffsets(groupID, nil) if err != nil { t.Fatal(err) } if len(offsetFetch.Blocks) != 2 { t.Fatal("Expected offsets on two topics. Found offsets on ", len(offsetFetch.Blocks), "topics.") } // Delete offset for partition topic test.4 partition 0 err = admin.DeleteConsumerGroupOffset(groupID, "test.4", 0) if err != nil { t.Fatal(err) } offsetFetch, err = admin.ListConsumerGroupOffsets(groupID, nil) if err != nil { t.Fatal(err) } if len(offsetFetch.Blocks) != 1 { t.Fatal("Expected offsets on one topic. Found offsets on ", len(offsetFetch.Blocks), "topics.") } if offsetFetch.Blocks["test.4"] != nil { t.Fatal("Offset still exists for topic 'topic.4'. It should have been deleted.") } } // -------------------------------------------------------------------- func testFuncConsumerGroupID(t *testing.T) string { return fmt.Sprintf("sarama.%s%d", t.Name(), time.Now().UnixNano()) } func markOffset(t *testing.T, offsetMgr OffsetManager, topic string, partition int32, offset int64) { partitionOffsetManager, err := offsetMgr.ManagePartition(topic, partition) defer safeClose(t, partitionOffsetManager) if err != nil { t.Fatal(err) } partitionOffsetManager.MarkOffset(offset, "") } func testFuncConsumerGroupFuzzySeed(topic string) error { client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) if err != nil { return err } defer func() { _ = client.Close() }() total := int64(0) for pn := int32(0); pn < 4; pn++ { newest, err := client.GetOffset(topic, pn, OffsetNewest) if err != nil { return err } oldest, err := client.GetOffset(topic, pn, OffsetOldest) if err != nil { return err } total = total + newest - oldest } if total >= 21000 { return nil } producer, err := NewAsyncProducerFromClient(client) if err != nil { return err } for i := total; i < 21000; i++ { producer.Input() <- &ProducerMessage{Topic: topic, Value: ByteEncoder([]byte("testdata"))} } return producer.Close() } type testFuncConsumerGroupMessage struct { ClientID string *ConsumerMessage } type testFuncConsumerGroupSink struct { msgs chan testFuncConsumerGroupMessage count int32 } func (s *testFuncConsumerGroupSink) Len() int { if s == nil { return -1 } return int(atomic.LoadInt32(&s.count)) } func (s *testFuncConsumerGroupSink) Push(clientID string, m *ConsumerMessage) { if s != nil { s.msgs <- testFuncConsumerGroupMessage{ClientID: clientID, ConsumerMessage: m} atomic.AddInt32(&s.count, 1) } } func (s *testFuncConsumerGroupSink) Close() map[string][]string { close(s.msgs) res := make(map[string][]string) for msg := range s.msgs { key := fmt.Sprintf("%s-%d:%d", msg.Topic, msg.Partition, msg.Offset) res[key] = append(res[key], msg.ClientID) } return res } type testFuncConsumerGroupMember struct { ConsumerGroup clientID string claims map[string]int generationId int32 state int32 handlers int32 errs []error maxMessages int32 isCapped bool sink *testFuncConsumerGroupSink t *testing.T mu sync.RWMutex } func defaultConfig(clientID string) *Config { config := NewFunctionalTestConfig() config.ClientID = clientID config.Consumer.Return.Errors = true config.Consumer.Offsets.Initial = OffsetOldest config.Consumer.Group.Rebalance.Timeout = 10 * time.Second config.Metadata.Full = false config.Metadata.RefreshFrequency = 5 * time.Second return config } func runTestFuncConsumerGroupMember(t *testing.T, groupID, clientID string, maxMessages int32, sink *testFuncConsumerGroupSink, topics ...string) *testFuncConsumerGroupMember { t.Helper() config := defaultConfig(clientID) return runTestFuncConsumerGroupMemberWithConfig(t, config, groupID, maxMessages, sink, topics...) } func runTestFuncConsumerGroupMemberWithConfig(t *testing.T, config *Config, groupID string, maxMessages int32, sink *testFuncConsumerGroupSink, topics ...string) *testFuncConsumerGroupMember { t.Helper() group, err := NewConsumerGroup(FunctionalTestEnv.KafkaBrokerAddrs, groupID, config) if err != nil { t.Fatal(err) return nil } if len(topics) == 0 { topics = []string{"test.4"} } member := &testFuncConsumerGroupMember{ ConsumerGroup: group, clientID: config.ClientID, claims: make(map[string]int), maxMessages: maxMessages, isCapped: maxMessages != 0, sink: sink, t: t, } go member.loop(topics) return member } func (m *testFuncConsumerGroupMember) AssertCleanShutdown() { m.t.Helper() if err := m.Close(); err != nil { m.t.Fatalf("unexpected error on Close(): %v", err) } m.WaitForState(4) m.WaitForHandlers(0) m.AssertNoErrs() } func (m *testFuncConsumerGroupMember) AssertNoErrs() { m.t.Helper() var errs []error m.mu.RLock() errs = append(errs, m.errs...) m.mu.RUnlock() if len(errs) != 0 { m.t.Fatalf("unexpected consumer errors: %v", errs) } } func (m *testFuncConsumerGroupMember) WaitForState(expected int32) { m.t.Helper() m.waitFor("state", expected, func() (interface{}, error) { return atomic.LoadInt32(&m.state), nil }) } func (m *testFuncConsumerGroupMember) WaitForHandlers(expected int) { m.t.Helper() m.waitFor("handlers", expected, func() (interface{}, error) { return int(atomic.LoadInt32(&m.handlers)), nil }) } func (m *testFuncConsumerGroupMember) WaitForClaims(expected map[string]int) { m.t.Helper() m.waitFor("claims", expected, func() (interface{}, error) { m.mu.RLock() claims := m.claims m.mu.RUnlock() return claims, nil }) } func (m *testFuncConsumerGroupMember) Stop() { _ = m.Close() } func (m *testFuncConsumerGroupMember) Setup(s ConsumerGroupSession) error { // store claims claims := make(map[string]int) for topic, partitions := range s.Claims() { claims[topic] = len(partitions) } m.mu.Lock() m.claims = claims m.mu.Unlock() // store generationID atomic.StoreInt32(&m.generationId, s.GenerationID()) // enter post-setup state atomic.StoreInt32(&m.state, 2) return nil } func (m *testFuncConsumerGroupMember) Cleanup(s ConsumerGroupSession) error { // enter post-cleanup state atomic.StoreInt32(&m.state, 3) return nil } func (m *testFuncConsumerGroupMember) ConsumeClaim(s ConsumerGroupSession, c ConsumerGroupClaim) error { atomic.AddInt32(&m.handlers, 1) defer atomic.AddInt32(&m.handlers, -1) for msg := range c.Messages() { if n := atomic.AddInt32(&m.maxMessages, -1); m.isCapped && n < 0 { break } s.MarkMessage(msg, "") m.sink.Push(m.clientID, msg) } return nil } func (m *testFuncConsumerGroupMember) waitFor(kind string, expected interface{}, factory func() (interface{}, error)) { m.t.Helper() deadline := time.NewTimer(60 * time.Second) defer deadline.Stop() ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() var actual interface{} for { var err error if actual, err = factory(); err != nil { m.t.Errorf("failed retrieve value, expected %s %#v but received error %v", kind, expected, err) } if reflect.DeepEqual(expected, actual) { return } select { case <-deadline.C: m.t.Fatalf("ttl exceeded, expected %s %#v but got %#v", kind, expected, actual) return case <-ticker.C: } } } func (m *testFuncConsumerGroupMember) loop(topics []string) { defer atomic.StoreInt32(&m.state, 4) go func() { for err := range m.Errors() { _ = m.Close() m.mu.Lock() m.errs = append(m.errs, err) m.mu.Unlock() } }() ctx := context.Background() for { // set state to pre-consume atomic.StoreInt32(&m.state, 1) if err := m.Consume(ctx, topics, m); errors.Is(err, ErrClosedConsumerGroup) { return } else if err != nil { m.mu.Lock() m.errs = append(m.errs, err) m.mu.Unlock() return } // return if capped if n := atomic.LoadInt32(&m.maxMessages); m.isCapped && n < 0 { return } } } func newTestStatefulStrategy(t *testing.T) *testStatefulStrategy { return &testStatefulStrategy{ BalanceStrategy: NewBalanceStrategyRange(), t: t, } } type testStatefulStrategy struct { BalanceStrategy t *testing.T initial int32 state sync.Map } func (h *testStatefulStrategy) Name() string { return "TestStatefulStrategy" } func (h *testStatefulStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { h.state = sync.Map{} for memberID, metadata := range members { if !strings.HasSuffix(string(metadata.UserData), "-stateful") { metadata.UserData = []byte(string(metadata.UserData) + "-stateful") atomic.AddInt32(&h.initial, 1) } h.state.Store(memberID, metadata.UserData) } return h.BalanceStrategy.Plan(members, topics) } func (h *testStatefulStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { if obj, ok := h.state.Load(memberID); ok { return obj.([]byte), nil } return nil, nil } func (h *testStatefulStrategy) AssertInitialValues(count int32) { h.t.Helper() actual := atomic.LoadInt32(&h.initial) if actual != count { h.t.Fatalf("unexpected count of initial values: %d, expected: %d", actual, count) } } func (h *testStatefulStrategy) AssertNoInitialValues() { h.t.Helper() h.AssertInitialValues(0) } golang-github-ibm-sarama-1.43.2/functional_consumer_staticmembership_test.go000066400000000000000000000150711461256741300274420ustar00rootroot00000000000000//go:build functional package sarama import ( "encoding/json" "errors" "math" "reflect" "sync/atomic" "testing" ) func TestFuncConsumerGroupStaticMembership_Basic(t *testing.T) { checkKafkaVersion(t, "2.3.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) t.Helper() config1 := NewFunctionalTestConfig() config1.ClientID = "M1" config1.Consumer.Offsets.Initial = OffsetNewest config1.Consumer.Group.InstanceId = "Instance1" m1 := runTestFuncConsumerGroupMemberWithConfig(t, config1, groupID, 100, nil, "test.4") defer m1.Close() config2 := NewFunctionalTestConfig() config2.ClientID = "M2" config2.Consumer.Offsets.Initial = OffsetNewest config2.Consumer.Group.InstanceId = "Instance2" m2 := runTestFuncConsumerGroupMemberWithConfig(t, config2, groupID, 100, nil, "test.4") defer m2.Close() m1.WaitForState(2) m2.WaitForState(2) err := testFuncConsumerGroupProduceMessage("test.4", 1000) if err != nil { t.Fatal(err) } admin, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config1) if err != nil { t.Fatal(err) } defer safeClose(t, admin) res, err := admin.DescribeConsumerGroups([]string{groupID}) if err != nil { t.Fatal(err) } if len(res) != 1 { t.Errorf("group description should be only 1, got %v\n", len(res)) } if len(res[0].Members) != 2 { t.Errorf("should have 2 members in group , got %v\n", len(res[0].Members)) } m1.WaitForState(4) m2.WaitForState(4) m1.AssertCleanShutdown() m2.AssertCleanShutdown() } func TestFuncConsumerGroupStaticMembership_RejoinAndLeave(t *testing.T) { checkKafkaVersion(t, "2.4.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) t.Helper() config1 := NewFunctionalTestConfig() config1.ClientID = "M1" config1.Consumer.Offsets.Initial = OffsetNewest config1.Consumer.Group.InstanceId = "Instance1" m1 := runTestFuncConsumerGroupMemberWithConfig(t, config1, groupID, math.MaxInt32, nil, "test.4") defer m1.Close() config2 := NewFunctionalTestConfig() config2.ClientID = "M2" config2.Consumer.Offsets.Initial = OffsetNewest config2.Consumer.Group.InstanceId = "Instance2" m2 := runTestFuncConsumerGroupMemberWithConfig(t, config2, groupID, math.MaxInt32, nil, "test.4") defer m2.Close() m1.WaitForState(2) m2.WaitForState(2) admin, err := NewClusterAdmin(FunctionalTestEnv.KafkaBrokerAddrs, config1) if err != nil { t.Fatal(err) } defer safeClose(t, admin) res1, err := admin.DescribeConsumerGroups([]string{groupID}) if err != nil { t.Fatal(err) } if len(res1) != 1 { t.Errorf("group description should be only 1, got %v\n", len(res1)) } if len(res1[0].Members) != 2 { t.Errorf("should have 2 members in group , got %v\n", len(res1[0].Members)) } generationId1 := m1.generationId // shut down m2, membership should not change (we didn't leave group when close) m2.AssertCleanShutdown() res2, err := admin.DescribeConsumerGroups([]string{groupID}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(res1, res2) { res1Bytes, _ := json.Marshal(res1) res2Bytes, _ := json.Marshal(res2) t.Errorf("group description be the same before %s, after %s", res1Bytes, res2Bytes) } generationId2 := atomic.LoadInt32(&m1.generationId) if generationId2 != generationId1 { t.Errorf("m1 generation should not increase expect %v, actual %v", generationId1, generationId2) } // m2 rejoin, should generate a new memberId, no re-balance happens m2 = runTestFuncConsumerGroupMemberWithConfig(t, config2, groupID, math.MaxInt32, nil, "test.4") m2.WaitForState(2) m1.WaitForState(2) res3, err := admin.DescribeConsumerGroups([]string{groupID}) if err != nil { t.Fatal(err) } if err != nil { t.Fatal(err) } if len(res3) != 1 { t.Errorf("group description should be only 1, got %v\n", len(res3)) } if len(res3[0].Members) != 2 { t.Errorf("should have 2 members in group , got %v\n", len(res3[0].Members)) } generationId3 := atomic.LoadInt32(&m1.generationId) if generationId3 != generationId1 { t.Errorf("m1 generation should not increase expect %v, actual %v", generationId1, generationId2) } m2.AssertCleanShutdown() removeResp, err := admin.RemoveMemberFromConsumerGroup(groupID, []string{config2.Consumer.Group.InstanceId}) if err != nil { t.Fatal(err) } if removeResp.Err != ErrNoError { t.Errorf("remove %s from consumer group failed %v", config2.Consumer.Group.InstanceId, removeResp.Err) } m1.WaitForHandlers(4) generationId4 := atomic.LoadInt32(&m1.generationId) if generationId4 == generationId1 { t.Errorf("m1 generation should increase expect %v, actual %v", generationId1, generationId2) } } func TestFuncConsumerGroupStaticMembership_Fenced(t *testing.T) { checkKafkaVersion(t, "2.3.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) groupID := testFuncConsumerGroupID(t) t.Helper() config1 := NewFunctionalTestConfig() config1.ClientID = "M1" config1.Consumer.Offsets.Initial = OffsetNewest config1.Consumer.Group.InstanceId = "Instance1" m1 := runTestFuncConsumerGroupMemberWithConfig(t, config1, groupID, math.MaxInt32, nil, "test.4") defer m1.Close() config2 := NewFunctionalTestConfig() config2.ClientID = "M2" config2.Consumer.Offsets.Initial = OffsetNewest config2.Consumer.Group.InstanceId = "Instance2" m2 := runTestFuncConsumerGroupMemberWithConfig(t, config2, groupID, math.MaxInt32, nil, "test.4") defer m2.Close() m1.WaitForState(2) m2.WaitForState(2) config3 := NewFunctionalTestConfig() config3.ClientID = "M3" config3.Consumer.Offsets.Initial = OffsetNewest config3.Consumer.Group.InstanceId = "Instance2" // same instance id as config2 m3 := runTestFuncConsumerGroupMemberWithConfig(t, config3, groupID, math.MaxInt32, nil, "test.4") defer m3.Close() m3.WaitForState(2) m2.WaitForState(4) if len(m2.errs) < 1 { t.Errorf("expect m2 to be fenced by group instanced id, but got no err") } if !errors.Is(m2.errs[0], ErrFencedInstancedId) { t.Errorf("expect m2 to be fenced by group instanced id, but got wrong err %v", m2.errs[0]) } m1.AssertCleanShutdown() m3.AssertCleanShutdown() } // -------------------------------------------------------------------- func testFuncConsumerGroupProduceMessage(topic string, count int) error { client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) if err != nil { return err } defer func() { _ = client.Close() }() producer, err := NewAsyncProducerFromClient(client) if err != nil { return err } for i := 0; i < count; i++ { producer.Input() <- &ProducerMessage{Topic: topic, Value: ByteEncoder([]byte("testdata"))} } return producer.Close() } golang-github-ibm-sarama-1.43.2/functional_consumer_test.go000066400000000000000000000421271461256741300240210ustar00rootroot00000000000000//go:build functional package sarama import ( "context" "errors" "fmt" "math" "os" "sort" "strconv" "strings" "sync" "testing" "time" "golang.org/x/sync/errgroup" "github.com/rcrowley/go-metrics" assert "github.com/stretchr/testify/require" ) func TestFuncConsumerOffsetOutOfRange(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) if err != nil { t.Fatal(err) } if _, err := consumer.ConsumePartition("test.1", 0, -10); !errors.Is(err, ErrOffsetOutOfRange) { t.Error("Expected ErrOffsetOutOfRange, got:", err) } if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); !errors.Is(err, ErrOffsetOutOfRange) { t.Error("Expected ErrOffsetOutOfRange, got:", err) } safeClose(t, consumer) } func TestConsumerHighWaterMarkOffset(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.Producer.Return.Successes = true p, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, p) _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) if err != nil { t.Fatal(err) } c, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, c) pc, err := c.ConsumePartition("test.1", 0, offset) if err != nil { t.Fatal(err) } <-pc.Messages() if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) } safeClose(t, pc) } // Makes sure that messages produced by all supported client versions/ // compression codecs (except LZ4) combinations can be consumed by all // supported consumer versions. It relies on the KAFKA_VERSION environment // variable to provide the version of the test Kafka cluster. // // Note that LZ4 codec was introduced in v0.10.0.0 and therefore is excluded // from this test case. It has a similar version matrix test case below that // only checks versions from v0.10.0.0 until KAFKA_VERSION. func TestVersionMatrix(t *testing.T) { metrics.UseNilMetrics = true // disable Sarama's go-metrics library t.Cleanup(func() { metrics.UseNilMetrics = false }) setupFunctionalTest(t) defer teardownFunctionalTest(t) // Produce lot's of message with all possible combinations of supported // protocol versions and compressions for the except of LZ4. testVersions := versionRange(V0_8_2_0) allCodecsButLZ4 := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy} producedMessages := produceMsgs(t, testVersions, allCodecsButLZ4, 17, 100, false) // When/Then consumeMsgs(t, testVersions, producedMessages) } // Support for LZ4 codec was introduced in v0.10.0.0 so a version matrix to // test LZ4 should start with v0.10.0.0. func TestVersionMatrixLZ4(t *testing.T) { metrics.UseNilMetrics = true // disable Sarama's go-metrics library t.Cleanup(func() { metrics.UseNilMetrics = false }) setupFunctionalTest(t) defer teardownFunctionalTest(t) // Produce lot's of message with all possible combinations of supported // protocol versions starting with v0.10 (first where LZ4 was supported) // and all possible compressions. testVersions := versionRange(V0_10_0_0) allCodecs := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy, CompressionLZ4} producedMessages := produceMsgs(t, testVersions, allCodecs, 17, 100, false) // When/Then consumeMsgs(t, testVersions, producedMessages) } // Support for zstd codec was introduced in v2.1.0.0 func TestVersionMatrixZstd(t *testing.T) { checkKafkaVersion(t, "2.1.0") metrics.UseNilMetrics = true // disable Sarama's go-metrics library t.Cleanup(func() { metrics.UseNilMetrics = false }) setupFunctionalTest(t) defer teardownFunctionalTest(t) // Produce lot's of message with all possible combinations of supported // protocol versions starting with v2.1.0.0 (first where zstd was supported) testVersions := versionRange(V2_1_0_0) allCodecs := []CompressionCodec{CompressionZSTD} producedMessages := produceMsgs(t, testVersions, allCodecs, 17, 100, false) // When/Then consumeMsgs(t, testVersions, producedMessages) } func TestVersionMatrixIdempotent(t *testing.T) { metrics.UseNilMetrics = true // disable Sarama's go-metrics library t.Cleanup(func() { metrics.UseNilMetrics = false }) setupFunctionalTest(t) defer teardownFunctionalTest(t) // Produce lot's of message with all possible combinations of supported // protocol versions starting with v0.11 (first where idempotent was supported) testVersions := versionRange(V0_11_0_0) producedMessages := produceMsgs(t, testVersions, []CompressionCodec{CompressionNone}, 17, 100, true) // When/Then consumeMsgs(t, testVersions, producedMessages) } func TestReadOnlyAndAllCommittedMessages(t *testing.T) { t.Skip("TODO: TestReadOnlyAndAllCommittedMessages is periodically failing inexplicably.") checkKafkaVersion(t, "0.11.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ClientID = t.Name() config.Net.MaxOpenRequests = 1 config.Consumer.IsolationLevel = ReadCommitted config.Producer.Idempotent = true config.Producer.Return.Successes = true config.Producer.RequiredAcks = WaitForAll client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer client.Close() controller, err := client.Controller() if err != nil { t.Fatal(err) } defer controller.Close() transactionalID := strconv.FormatInt(time.Now().UnixNano()/(1<<22), 10) var coordinator *Broker // find the transaction coordinator for { coordRes, err := controller.FindCoordinator(&FindCoordinatorRequest{ Version: 2, CoordinatorKey: transactionalID, CoordinatorType: CoordinatorTransaction, }) if err != nil { t.Fatal(err) } if coordRes.Err != ErrNoError { continue } if err := coordRes.Coordinator.Open(client.Config()); err != nil { t.Fatal(err) } coordinator = coordRes.Coordinator break } // produce some uncommitted messages to the topic pidRes, err := coordinator.InitProducerID(&InitProducerIDRequest{ TransactionalID: &transactionalID, TransactionTimeout: 10 * time.Second, }) if err != nil { t.Fatal(err) } _, _ = coordinator.AddPartitionsToTxn(&AddPartitionsToTxnRequest{ TransactionalID: transactionalID, ProducerID: pidRes.ProducerID, ProducerEpoch: pidRes.ProducerEpoch, TopicPartitions: map[string][]int32{ uncommittedTopic: {0}, }, }) if err != nil { t.Fatal(err) } ps := &produceSet{ msgs: make(map[string]map[int32]*partitionSet), parent: &asyncProducer{ conf: config, txnmgr: &transactionManager{}, }, producerID: pidRes.ProducerID, producerEpoch: pidRes.ProducerEpoch, } _ = ps.add(&ProducerMessage{ Topic: uncommittedTopic, Partition: 0, Value: StringEncoder("uncommitted message 1"), }) _ = ps.add(&ProducerMessage{ Topic: uncommittedTopic, Partition: 0, Value: StringEncoder("uncommitted message 2"), }) produceReq := ps.buildRequest() produceReq.TransactionalID = &transactionalID if resp, err := coordinator.Produce(produceReq); err != nil { t.Fatal(err) } else { b := resp.GetBlock(uncommittedTopic, 0) if b != nil { t.Logf("uncommitted message 1 to %s-%d at offset %d", uncommittedTopic, 0, b.Offset) t.Logf("uncommitted message 2 to %s-%d at offset %d", uncommittedTopic, 0, b.Offset+1) } } // now produce some committed messages to the topic producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer producer.Close() for i := 1; i <= 6; i++ { producer.Input() <- &ProducerMessage{ Topic: uncommittedTopic, Partition: 0, Value: StringEncoder(fmt.Sprintf("Committed %v", i)), } msg := <-producer.Successes() t.Logf("Committed %v to %s-%d at offset %d", i, msg.Topic, msg.Partition, msg.Offset) } // now abort the uncommitted transaction if _, err := coordinator.EndTxn(&EndTxnRequest{ TransactionalID: transactionalID, ProducerID: pidRes.ProducerID, ProducerEpoch: pidRes.ProducerEpoch, TransactionResult: false, // aborted }); err != nil { t.Fatal(err) } consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer consumer.Close() pc, err := consumer.ConsumePartition(uncommittedTopic, 0, OffsetOldest) assert.NoError(t, err) msgChannel := pc.Messages() for i := 1; i <= 6; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) assert.Equal(t, fmt.Sprintf("Committed %v", i), string(msg.Value)) } } func TestConsumerGroupDeadlock(t *testing.T) { checkKafkaVersion(t, "1.1.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) const topic = "test_consumer_group_rebalance_test_topic" const msgQty = 50 partitionsQty := len(FunctionalTestEnv.KafkaBrokerAddrs) * 3 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) config := NewFunctionalTestConfig() config.Version = V1_1_0_0 config.ClientID = t.Name() config.Producer.Return.Successes = true config.ChannelBufferSize = 2 * msgQty client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) assert.NoError(t, err) admin, err := NewClusterAdminFromClient(client) assert.NoError(t, err) cgName := "test_consumer_group_rebalance_consumer_group" err = admin.DeleteConsumerGroup(cgName) if err != nil { t.Logf("failed to delete topic: %s", err) } err = admin.DeleteTopic(topic) if err != nil { t.Logf("failed to delete topic: %s", err) } // it takes time to delete topic, the API is not sync for i := 0; i < 5; i++ { err = admin.CreateTopic(topic, &TopicDetail{NumPartitions: int32(partitionsQty), ReplicationFactor: 1}, false) if err == nil { break } if errors.Is(err, ErrTopicAlreadyExists) || strings.Contains(err.Error(), "is marked for deletion") { time.Sleep(500 * time.Millisecond) continue } break } assert.NoError(t, err) defer func() { _ = admin.DeleteTopic(topic) }() var wg sync.WaitGroup consumer, err := NewConsumerFromClient(client) assert.NoError(t, err) ch := make(chan string, msgQty) for i := 0; i < partitionsQty; i++ { time.Sleep(250 * time.Millisecond) // ensure delays between the "claims" wg.Add(1) go func(i int) { defer wg.Done() pConsumer, err := consumer.ConsumePartition(topic, int32(i), OffsetOldest) assert.NoError(t, err) defer pConsumer.Close() for { select { case <-ctx.Done(): return case msg, ok := <-pConsumer.Messages(): if !ok { return } // t.Logf("consumer-group %d consumed: %v from %s/%d/%d", i, msg.Value, msg.Topic, msg.Partition, msg.Offset) ch <- string(msg.Value) } } }(i) } producer, err := NewSyncProducerFromClient(client) assert.NoError(t, err) for i := 0; i < msgQty; i++ { msg := &ProducerMessage{ Topic: topic, Value: StringEncoder(strconv.FormatInt(int64(i), 10)), } _, _, err := producer.SendMessage(msg) assert.NoError(t, err) } var received []string func() { for len(received) < msgQty { select { case <-ctx.Done(): return case msg := <-ch: received = append(received, msg) // t.Logf("received: %s, count: %d", msg, len(received)) } } }() cancel() assert.Equal(t, msgQty, len(received)) err = producer.Close() assert.NoError(t, err) err = consumer.Close() assert.NoError(t, err) err = client.Close() assert.NoError(t, err) wg.Wait() } func prodMsg2Str(prodMsg *ProducerMessage) string { return fmt.Sprintf("{offset: %d, value: %s}", prodMsg.Offset, string(prodMsg.Value.(StringEncoder))) } func consMsg2Str(consMsg *ConsumerMessage) string { return fmt.Sprintf("{offset: %d, value: %s}", consMsg.Offset, string(consMsg.Value)) } func versionRange(lower KafkaVersion) []KafkaVersion { // Get the test cluster version from the environment. If there is nothing // there then assume the highest. upper, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION")) if err != nil { upper = MaxVersion } versions := make([]KafkaVersion, 0, len(fvtRangeVersions)) for _, v := range fvtRangeVersions { if !v.IsAtLeast(lower) { continue } if !upper.IsAtLeast(v) { return versions } versions = append(versions, v) } return versions } func produceMsgs(t *testing.T, clientVersions []KafkaVersion, codecs []CompressionCodec, flush int, countPerVerCodec int, idempotent bool) []*ProducerMessage { var ( producers []SyncProducer producedMessagesMu sync.Mutex producedMessages []*ProducerMessage ) g := errgroup.Group{} for _, prodVer := range clientVersions { for _, codec := range codecs { prodCfg := NewFunctionalTestConfig() prodCfg.ClientID = t.Name() + "-Producer-" + prodVer.String() if idempotent { prodCfg.ClientID += "-idempotent" } if codec > 0 { prodCfg.ClientID += "-" + codec.String() } prodCfg.Metadata.Full = false prodCfg.Version = prodVer prodCfg.Producer.Return.Successes = true prodCfg.Producer.Return.Errors = true prodCfg.Producer.Flush.MaxMessages = flush prodCfg.Producer.Compression = codec prodCfg.Producer.Idempotent = idempotent if idempotent { prodCfg.Producer.RequiredAcks = WaitForAll prodCfg.Net.MaxOpenRequests = 1 } p, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, prodCfg) if err != nil { t.Fatalf("Failed to create producer: version=%s, compression=%s, err=%v", prodVer, codec, err) } producers = append(producers, p) prodVer := prodVer codec := codec g.Go(func() error { t.Logf("*** Producing with client version %s codec %s\n", prodVer, codec) var wg sync.WaitGroup for i := 0; i < countPerVerCodec; i++ { msg := &ProducerMessage{ Topic: "test.1", Value: StringEncoder(fmt.Sprintf("msg:%s:%s:%d", prodVer, codec, i)), } wg.Add(1) go func() { defer wg.Done() _, _, err := p.SendMessage(msg) if err != nil { t.Errorf("Failed to produce message: %s, err=%v", msg.Value, err) } producedMessagesMu.Lock() producedMessages = append(producedMessages, msg) producedMessagesMu.Unlock() }() } wg.Wait() return nil }) } } if err := g.Wait(); err != nil { t.Fatal(err) } for _, p := range producers { safeClose(t, p) } // Sort produced message in ascending offset order. sort.Slice(producedMessages, func(i, j int) bool { return producedMessages[i].Offset < producedMessages[j].Offset }) assert.NotEmpty(t, producedMessages, "should have produced >0 messages") t.Logf("*** Total produced %d, firstOffset=%d, lastOffset=%d\n", len(producedMessages), producedMessages[0].Offset, producedMessages[len(producedMessages)-1].Offset) return producedMessages } func consumeMsgs(t *testing.T, clientVersions []KafkaVersion, producedMessages []*ProducerMessage) { // Consume all produced messages with all client versions supported by the // cluster. g := errgroup.Group{} for _, consVer := range clientVersions { // Create a partition consumer that should start from the first produced // message. consCfg := NewFunctionalTestConfig() consCfg.ClientID = t.Name() + "-Consumer-" + consVer.String() consCfg.Consumer.MaxProcessingTime = time.Second consCfg.Metadata.Full = false consCfg.Version = consVer c, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, consCfg) if err != nil { t.Fatal(err) } defer safeClose(t, c) //nolint: gocritic // the close intentionally happens outside the loop pc, err := c.ConsumePartition("test.1", 0, producedMessages[0].Offset) if err != nil { t.Fatal(err) } defer safeClose(t, pc) //nolint: gocritic // the close intentionally happens outside the loop var wg sync.WaitGroup wg.Add(1) consVer := consVer g.Go(func() error { // Consume as many messages as there have been produced and make sure that // order is preserved. t.Logf("*** Consuming with client version %s\n", consVer) for i, prodMsg := range producedMessages { select { case consMsg := <-pc.Messages(): if consMsg.Offset != prodMsg.Offset { t.Fatalf("Consumed unexpected offset: version=%s, index=%d, want=%s, got=%s", consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg)) } if string(consMsg.Value) != string(prodMsg.Value.(StringEncoder)) { t.Fatalf("Consumed unexpected msg: version=%s, index=%d, want=%s, got=%s", consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg)) } if i == 0 { t.Logf("Consumed first msg: version=%s, index=%d, got=%s", consVer, i, consMsg2Str(consMsg)) wg.Done() } if i%1000 == 0 { t.Logf("Consumed messages: version=%s, index=%d, got=%s", consVer, i, consMsg2Str(consMsg)) } case <-time.After(15 * time.Second): t.Fatalf("Timeout %s waiting for: index=%d, offset=%d, msg=%s", consCfg.ClientID, i, prodMsg.Offset, prodMsg.Value) } } return nil }) wg.Wait() // wait for first message to be consumed before starting next consumer } if err := g.Wait(); err != nil { t.Fatal(err) } } golang-github-ibm-sarama-1.43.2/functional_offset_manager_test.go000066400000000000000000000021101461256741300251320ustar00rootroot00000000000000//go:build functional package sarama import ( "testing" ) func TestFuncOffsetManager(t *testing.T) { checkKafkaVersion(t, "0.8.2") setupFunctionalTest(t) defer teardownFunctionalTest(t) client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) if err != nil { t.Fatal(err) } offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client) if err != nil { t.Fatal(err) } pom1, err := offsetManager.ManagePartition("test.1", 0) if err != nil { t.Fatal(err) } pom1.MarkOffset(10, "test metadata") safeClose(t, pom1) // Avoid flaky test: submit offset & let om cleanup removed poms offsetManager.Commit() pom2, err := offsetManager.ManagePartition("test.1", 0) if err != nil { t.Fatal(err) } offset, metadata := pom2.NextOffset() if offset != 10 { t.Errorf("Expected the next offset to be 10, found %d.", offset) } if metadata != "test metadata" { t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata) } safeClose(t, pom2) safeClose(t, offsetManager) safeClose(t, client) } golang-github-ibm-sarama-1.43.2/functional_producer_test.go000066400000000000000000001062761461256741300240170ustar00rootroot00000000000000//go:build functional package sarama import ( "context" "errors" "fmt" "os" "strconv" "strings" "sync" "testing" "time" "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/require" "github.com/IBM/sarama/internal/toxiproxy" ) const TestBatchSize = 1000 func TestFuncProducing(t *testing.T) { config := NewFunctionalTestConfig() testProducingMessages(t, config, MinVersion) } func TestFuncProducingGzip(t *testing.T) { config := NewFunctionalTestConfig() config.Producer.Compression = CompressionGZIP testProducingMessages(t, config, MinVersion) } func TestFuncProducingSnappy(t *testing.T) { config := NewFunctionalTestConfig() config.Producer.Compression = CompressionSnappy testProducingMessages(t, config, MinVersion) } func TestFuncProducingZstd(t *testing.T) { config := NewFunctionalTestConfig() config.Producer.Compression = CompressionZSTD testProducingMessages(t, config, V2_1_0_0) // must be at least 2.1.0.0 for zstd } func TestFuncProducingNoResponse(t *testing.T) { config := NewFunctionalTestConfig() config.Producer.RequiredAcks = NoResponse testProducingMessages(t, config, MinVersion) } func TestFuncProducingFlushing(t *testing.T) { config := NewFunctionalTestConfig() config.Producer.Flush.Messages = TestBatchSize / 8 config.Producer.Flush.Frequency = 250 * time.Millisecond testProducingMessages(t, config, MinVersion) } func TestFuncMultiPartitionProduce(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Return.Successes = true producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } var wg sync.WaitGroup wg.Add(TestBatchSize) for i := 1; i <= TestBatchSize; i++ { go func(i int) { defer wg.Done() msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} if _, _, err := producer.SendMessage(msg); err != nil { t.Error(i, err) } }(i) } wg.Wait() if err := producer.Close(); err != nil { t.Error(err) } } func TestFuncTxnProduceNoBegin(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduceNoBegin" config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Max = 50 config.Consumer.IsolationLevel = ReadCommitted config.Producer.Return.Errors = true config.Producer.Transaction.Retry.Max = 200 config.Net.MaxOpenRequests = 1 producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} producerError := <-producer.Errors() require.Error(t, producerError) } func TestFuncTxnCommitNoMessages(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnCommitNoMessages" config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Max = 50 config.Consumer.IsolationLevel = ReadCommitted config.Producer.Return.Errors = true config.Producer.Transaction.Retry.Max = 200 config.Net.MaxOpenRequests = 1 producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() err = producer.BeginTxn() require.NoError(t, err) err = producer.AbortTxn() require.NoError(t, err) err = producer.BeginTxn() require.NoError(t, err) err = producer.CommitTxn() require.NoError(t, err) } func TestFuncTxnProduce(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduce" config.Producer.RequiredAcks = WaitForAll config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) require.NoError(t, err) defer nonTransactionalProducer.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() err = producer.BeginTxn() require.NoError(t, err) for i := 0; i < 1; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} } err = producer.CommitTxn() require.NoError(t, err) for i := 0; i < 1; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) } } func TestFuncTxnProduceWithBrokerFailure(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduceWithBrokerFailure" config.Producer.RequiredAcks = WaitForAll config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) require.NoError(t, err) defer nonTransactionalProducer.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() txCoordinator, _ := producer.(*asyncProducer).client.TransactionCoordinator(config.Producer.Transaction.ID) err = producer.BeginTxn() require.NoError(t, err) if err := stopDockerTestBroker(context.Background(), txCoordinator.id); err != nil { t.Fatal(err) } defer func() { if err := startDockerTestBroker(context.Background(), txCoordinator.id); err != nil { t.Fatal(err) } t.Logf("\n") }() for i := 0; i < 1; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} } err = producer.CommitTxn() require.NoError(t, err) for i := 0; i < 1; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) } } func TestFuncTxnProduceEpochBump(t *testing.T) { checkKafkaVersion(t, "2.6.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduceEpochBump" config.Producer.RequiredAcks = WaitForAll config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) require.NoError(t, err) defer nonTransactionalProducer.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() err = producer.BeginTxn() require.NoError(t, err) for i := 0; i < 1; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} } err = producer.CommitTxn() require.NoError(t, err) for i := 0; i < 1; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) } err = producer.BeginTxn() require.NoError(t, err) for i := 0; i < 1; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} } err = producer.CommitTxn() require.NoError(t, err) for i := 0; i < 1; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) } } func TestFuncInitProducerId3(t *testing.T) { checkKafkaVersion(t, "2.6.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncInitProducerId3" config.Producer.RequiredAcks = WaitForAll config.Producer.Retry.Max = 50 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() require.Equal(t, true, producer.(*asyncProducer).txnmgr.coordinatorSupportsBumpingEpoch) } type messageHandler struct { *testing.T h func(*ConsumerMessage) started sync.WaitGroup } func (h *messageHandler) Setup(s ConsumerGroupSession) error { return nil } func (h *messageHandler) Cleanup(s ConsumerGroupSession) error { return nil } func (h *messageHandler) ConsumeClaim(sess ConsumerGroupSession, claim ConsumerGroupClaim) error { h.started.Done() for msg := range claim.Messages() { h.Logf("consumed msg %v", msg) h.h(msg) } return nil } func TestFuncTxnProduceAndCommitOffset(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduceAndCommitOffset" config.Producer.RequiredAcks = WaitForAll config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Consumer.Offsets.AutoCommit.Enable = false config.Net.MaxOpenRequests = 1 client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer client.Close() admin, err := NewClusterAdminFromClient(client) require.NoError(t, err) defer admin.Close() producer, err := NewAsyncProducerFromClient(client) require.NoError(t, err) defer producer.Close() cg, err := NewConsumerGroupFromClient("test-produce", client) require.NoError(t, err) defer cg.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() handler := &messageHandler{} handler.T = t handler.h = func(msg *ConsumerMessage) { err := producer.BeginTxn() require.NoError(t, err) producer.Input() <- &ProducerMessage{Topic: "test.1", Value: StringEncoder("test-prod")} err = producer.AddMessageToTxn(msg, "test-produce", nil) require.NoError(t, err) err = producer.CommitTxn() require.NoError(t, err) } handler.started.Add(4) go func() { err = cg.Consume(ctx, []string{"test.4"}, handler) require.NoError(t, err) }() handler.started.Wait() nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, NewFunctionalTestConfig()) require.NoError(t, err) defer nonTransactionalProducer.Close() consumer, err := NewConsumerFromClient(client) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel for i := 0; i < 1; i++ { nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.4", Key: nil, Value: StringEncoder("test")} } for i := 0; i < 1; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) } topicPartitions := make(map[string][]int32) topicPartitions["test.4"] = []int32{0, 1, 2, 3} topicsDescription, err := admin.ListConsumerGroupOffsets("test-produce", topicPartitions) require.NoError(t, err) for _, partition := range topicPartitions["test.4"] { block := topicsDescription.GetBlock("test.4", partition) _ = client.RefreshMetadata("test.4") lastOffset, err := client.GetOffset("test.4", partition, OffsetNewest) require.NoError(t, err) if block.Offset > -1 { require.Equal(t, lastOffset, block.Offset) } } } func TestFuncTxnProduceMultiTxn(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnProduceMultiTxn" config.Producer.RequiredAcks = WaitForAll config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 configSecond := NewFunctionalTestConfig() configSecond.ChannelBufferSize = 20 configSecond.Producer.Flush.Frequency = 50 * time.Millisecond configSecond.Producer.Flush.Messages = 200 configSecond.Producer.Idempotent = true configSecond.Producer.Transaction.ID = "TestFuncTxnProduceMultiTxn-second" configSecond.Producer.RequiredAcks = WaitForAll configSecond.Producer.Retry.Max = 50 configSecond.Consumer.IsolationLevel = ReadCommitted configSecond.Net.MaxOpenRequests = 1 consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() nonTransactionalConfig := NewFunctionalTestConfig() nonTransactionalConfig.Producer.Return.Successes = true nonTransactionalConfig.Producer.Return.Errors = true nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, nonTransactionalConfig) require.NoError(t, err) defer nonTransactionalProducer.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer producer.Close() producerSecond, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, configSecond) require.NoError(t, err) defer producerSecond.Close() err = producer.BeginTxn() require.NoError(t, err) for i := 0; i < 2; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test-committed")} } err = producerSecond.BeginTxn() require.NoError(t, err) for i := 0; i < 2; i++ { producerSecond.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test-aborted")} } err = producer.CommitTxn() require.NoError(t, err) err = producerSecond.AbortTxn() require.NoError(t, err) for i := 0; i < 2; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) require.Equal(t, "test-committed", string(msg.Value)) } } func TestFuncTxnAbortedProduce(t *testing.T) { checkKafkaVersion(t, "0.11.0.0") setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ChannelBufferSize = 20 config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Idempotent = true config.Producer.Transaction.ID = "TestFuncTxnAbortedProduce" config.Producer.RequiredAcks = WaitForAll config.Producer.Return.Successes = true config.Producer.Transaction.Retry.Max = 200 config.Consumer.IsolationLevel = ReadCommitted config.Net.MaxOpenRequests = 1 client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) require.NoError(t, err) defer client.Close() consumer, err := NewConsumerFromClient(client) require.NoError(t, err) defer consumer.Close() pc, err := consumer.ConsumePartition("test.1", 0, OffsetNewest) msgChannel := pc.Messages() require.NoError(t, err) defer pc.Close() nonTransactionalConfig := NewFunctionalTestConfig() nonTransactionalConfig.Producer.Return.Successes = true nonTransactionalConfig.Producer.Return.Errors = true nonTransactionalProducer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, nonTransactionalConfig) require.NoError(t, err) defer nonTransactionalProducer.Close() // Ensure consumer is started nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("test")} <-msgChannel producer, err := NewAsyncProducerFromClient(client) require.NoError(t, err) defer producer.Close() err = producer.BeginTxn() require.NoError(t, err) for i := 0; i < 2; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("transactional")} } for i := 0; i < 2; i++ { <-producer.Successes() } err = producer.AbortTxn() require.NoError(t, err) for i := 0; i < 2; i++ { nonTransactionalProducer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder("non-transactional")} <-nonTransactionalProducer.Successes() } for i := 0; i < 2; i++ { msg := <-msgChannel t.Logf("Received %s from %s-%d at offset %d", msg.Value, msg.Topic, msg.Partition, msg.Offset) require.Equal(t, "non-transactional", string(msg.Value)) } } func TestFuncProducingToInvalidTopic(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.Producer.Return.Successes = true producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); !errors.Is(err, ErrUnknownTopicOrPartition) && !errors.Is(err, ErrInvalidTopic) { t.Error("Expected ErrUnknownTopicOrPartition, found", err) } safeClose(t, producer) } func TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.Producer.Flush.Frequency = 250 * time.Millisecond config.Producer.Idempotent = true config.Producer.Timeout = 500 * time.Millisecond config.Producer.Retry.Max = 1 config.Producer.Retry.Backoff = 500 * time.Millisecond config.Producer.Return.Successes = true config.Producer.Return.Errors = true config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, producer) // Successfully publish a few messages for i := 0; i < 10; i++ { _, _, err = producer.SendMessage(&ProducerMessage{ Topic: "test.1", Value: StringEncoder(fmt.Sprintf("%d message", i)), }) if err != nil { t.Fatal(err) } } // break the brokers. for proxyName, proxy := range FunctionalTestEnv.Proxies { if !strings.Contains(proxyName, "kafka") { continue } if err := proxy.Disable(); err != nil { t.Fatal(err) } } // This should fail hard now for i := 10; i < 20; i++ { _, _, err = producer.SendMessage(&ProducerMessage{ Topic: "test.1", Value: StringEncoder(fmt.Sprintf("%d message", i)), }) if err == nil { t.Fatal(err) } } // Now bring the proxy back up for proxyName, proxy := range FunctionalTestEnv.Proxies { if !strings.Contains(proxyName, "kafka") { continue } if err := proxy.Enable(); err != nil { t.Fatal(err) } } // We should be able to publish again (once everything calms down) // (otherwise it times out) for { _, _, err = producer.SendMessage(&ProducerMessage{ Topic: "test.1", Value: StringEncoder("comeback message"), }) if err == nil { break } } } func TestInterceptors(t *testing.T) { config := NewFunctionalTestConfig() setupFunctionalTest(t) defer teardownFunctionalTest(t) config.Producer.Return.Successes = true config.Consumer.Return.Errors = true config.Producer.Interceptors = []ProducerInterceptor{&appendInterceptor{i: 0}, &appendInterceptor{i: 100}} config.Consumer.Interceptors = []ConsumerInterceptor{&appendInterceptor{i: 20}} client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, client) initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest) if err != nil { t.Fatal(err) } producer, err := NewAsyncProducerFromClient(client) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(TestMessage)} } for i := 0; i < 10; i++ { select { case msg := <-producer.Errors(): t.Error(msg.Err) case msg := <-producer.Successes(): v, _ := msg.Value.Encode() expected := TestMessage + strconv.Itoa(i) + strconv.Itoa(i+100) if string(v) != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } } } safeClose(t, producer) master, err := NewConsumerFromClient(client) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("test.1", 0, initialOffset) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { select { case <-time.After(10 * time.Second): t.Fatal("Not received any more events in the last 10 seconds.") case err := <-consumer.Errors(): t.Error(err) case msg := <-consumer.Messages(): prodInteExpectation := strconv.Itoa(i) + strconv.Itoa(i+100) consInteExpectation := strconv.Itoa(i + 20) expected := TestMessage + prodInteExpectation + consInteExpectation v := string(msg.Value) if v != expected { t.Errorf("Interceptor should have incremented the value, got %s, expected %s", v, expected) } } } safeClose(t, consumer) } func testProducingMessages(t *testing.T, config *Config, minVersion KafkaVersion) { setupFunctionalTest(t) defer teardownFunctionalTest(t) // Configure some latency in order to properly validate the request latency metric for _, proxy := range FunctionalTestEnv.Proxies { if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil { t.Fatal("Unable to configure latency toxicity", err) } } config.Producer.Return.Successes = true config.Consumer.Return.Errors = true kafkaVersions := map[KafkaVersion]bool{} for _, v := range []KafkaVersion{MinVersion, V0_10_0_0, V0_11_0_0, V1_0_0_0, V2_0_0_0, V2_1_0_0} { if v.IsAtLeast(minVersion) { kafkaVersions[v] = true } } if upper, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION")); err != nil { kafkaVersions[upper] = true } for version := range kafkaVersions { name := t.Name() + "-v" + version.String() t.Run(name, func(t *testing.T) { config.ClientID = name config.MetricRegistry = metrics.NewRegistry() checkKafkaVersion(t, version.String()) config.Version = version client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } defer safeClose(t, client) // Keep in mind the current offset initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest) if err != nil { t.Fatal(err) } producer, err := NewAsyncProducerFromClient(client) if err != nil { t.Fatal(err) } expectedResponses := TestBatchSize for i := 1; i <= TestBatchSize; { msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} select { case producer.Input() <- msg: i++ case ret := <-producer.Errors(): t.Fatal(ret.Err) case <-producer.Successes(): expectedResponses-- } } for expectedResponses > 0 { select { case ret := <-producer.Errors(): t.Fatal(ret.Err) case <-producer.Successes(): expectedResponses-- } } safeClose(t, producer) // Validate producer metrics before using the consumer minus the offset request validateProducerMetrics(t, client) master, err := NewConsumerFromClient(client) if err != nil { t.Fatal(err) } consumer, err := master.ConsumePartition("test.1", 0, initialOffset) if err != nil { t.Fatal(err) } for i := 1; i <= TestBatchSize; i++ { select { case <-time.After(10 * time.Second): t.Fatal("Not received any more events in the last 10 seconds.") case err := <-consumer.Errors(): t.Error(err) case message := <-consumer.Messages(): if string(message.Value) != fmt.Sprintf("testing %d", i) { t.Fatalf("Unexpected message with index %d: %s", i, message.Value) } } } validateConsumerMetrics(t, client) safeClose(t, consumer) }) } } // TestAsyncProducerRemoteBrokerClosed ensures that the async producer can // cleanly recover if network connectivity to the remote brokers is lost and // then subsequently resumed. // // https://github.com/IBM/sarama/issues/2129 func TestAsyncProducerRemoteBrokerClosed(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) config := NewFunctionalTestConfig() config.ClientID = t.Name() config.Net.MaxOpenRequests = 1 config.Producer.Flush.MaxMessages = 1 config.Producer.Return.Successes = true config.Producer.Retry.Max = 1024 config.Producer.Retry.Backoff = time.Millisecond * 50 producer, err := NewAsyncProducer( FunctionalTestEnv.KafkaBrokerAddrs, config, ) if err != nil { t.Fatal(err) } // produce some more messages and ensure success for i := 0; i < 10; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(TestMessage)} <-producer.Successes() } // shutdown all the active tcp connections for _, proxy := range FunctionalTestEnv.Proxies { _ = proxy.Disable() } // produce some more messages for i := 10; i < 20; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(TestMessage)} } // re-open the proxies for _, proxy := range FunctionalTestEnv.Proxies { _ = proxy.Enable() } // ensure the previously produced messages succeed for i := 10; i < 20; i++ { <-producer.Successes() } // produce some more messages and ensure success for i := 20; i < 30; i++ { producer.Input() <- &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(TestMessage)} <-producer.Successes() } closeProducer(t, producer) } func validateProducerMetrics(t *testing.T, client Client) { // Get the broker used by test1 topic var broker *Broker if partitions, err := client.Partitions("test.1"); err != nil { t.Error(err) } else { for _, partition := range partitions { if b, err := client.Leader("test.1", partition); err != nil { t.Error(err) } else { if broker != nil && b != broker { t.Fatal("Expected only one broker, got at least 2") } broker = b } } } metricValidators := newMetricValidators() noResponse := client.Config().Producer.RequiredAcks == NoResponse compressionEnabled := client.Config().Producer.Compression != CompressionNone // We are adding 10ms of latency to all requests with toxiproxy minRequestLatencyInMs := 10 if noResponse { // but when we do not wait for a response it can be less than 1ms minRequestLatencyInMs = 0 } // We read at least 1 byte from the broker metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1)) // in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request) metricValidators.register(minCountMeterValidator("request-rate", 3)) metricValidators.register(minCountHistogramValidator("request-size", 3)) metricValidators.register(minValHistogramValidator("request-size", 1)) // and at least 2 requests to the registered broker (offset + produces) metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2)) metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2)) metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1)) metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs)) // We send at least 1 batch metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1)) metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1)) if compressionEnabled { // We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1)) if client.Config().Version.IsAtLeast(V0_11_0_0) { // slightly better compression with batching metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 30)) } else { metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50)) } metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000)) } else { // We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record if client.Config().Version.IsAtLeast(V0_11_0_0) { // records will be grouped in batchSet rather than msgSet metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 3)) } else { metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize)) } metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100)) metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100)) } // We send exactly TestBatchSize messages metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize)) // We send at least one record per request metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1)) metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1)) // We receive at least 1 byte from the broker metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1)) if noResponse { // in exactly 2 global responses (metadata + offset) metricValidators.register(countMeterValidator("response-rate", 2)) metricValidators.register(minCountHistogramValidator("response-size", 2)) // and exactly 1 offset response for the registered broker metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1)) metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1)) metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1)) } else { // in at least 3 global responses (metadata + offset + produces) metricValidators.register(minCountMeterValidator("response-rate", 3)) metricValidators.register(minCountHistogramValidator("response-size", 3)) // and at least 2 for the registered broker metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2)) metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2)) metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1)) } // There should be no requests in flight anymore metricValidators.registerForAllBrokers(broker, counterValidator("requests-in-flight", 0)) // Run the validators metricValidators.run(t, client.Config().MetricRegistry) } func validateConsumerMetrics(t *testing.T, client Client) { // Get the broker used by test1 topic var broker *Broker if partitions, err := client.Partitions("test.1"); err != nil { t.Error(err) } else { for _, partition := range partitions { if b, err := client.Leader("test.1", partition); err != nil { t.Error(err) } else { if broker != nil && b != broker { t.Fatal("Expected only one broker, got at least 2") } broker = b } } } metricValidators := newMetricValidators() // at least 1 global fetch request for the given topic metricValidators.registerForGlobalAndTopic("test_1", minCountMeterValidator("consumer-fetch-rate", 1)) // and at least 1 fetch request to the lead broker metricValidators.registerForBroker(broker, minCountMeterValidator("consumer-fetch-rate", 1)) // Run the validators metricValidators.run(t, client.Config().MetricRegistry) } // Benchmarks func BenchmarkProducerSmall(b *testing.B) { benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) } func BenchmarkProducerMedium(b *testing.B) { benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) } func BenchmarkProducerLarge(b *testing.B) { benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) } func BenchmarkProducerSmallSinglePartition(b *testing.B) { benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) } func BenchmarkProducerMediumSnappy(b *testing.B) { conf := NewFunctionalTestConfig() conf.Producer.Compression = CompressionSnappy benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) } func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { setupFunctionalTest(b) defer teardownFunctionalTest(b) metricsDisable := os.Getenv("METRICS_DISABLE") if metricsDisable != "" { previousUseNilMetrics := metrics.UseNilMetrics Logger.Println("Disabling metrics using no-op implementation") metrics.UseNilMetrics = true // Restore previous setting defer func() { metrics.UseNilMetrics = previousUseNilMetrics }() } producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, conf) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 1; i <= b.N; { msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} select { case producer.Input() <- msg: i++ case ret := <-producer.Errors(): b.Fatal(ret.Err) } } safeClose(b, producer) } golang-github-ibm-sarama-1.43.2/functional_test.go000066400000000000000000000352751461256741300221140ustar00rootroot00000000000000//go:build functional package sarama import ( "context" "errors" "fmt" "log" "net" "net/url" "os" "os/exec" "strconv" "strings" "testing" "time" "github.com/IBM/sarama/internal/toxiproxy" ) const uncommittedTopic = "uncommitted-topic-test-4" var ( testTopicDetails = map[string]*TopicDetail{ "test.1": { NumPartitions: 1, ReplicationFactor: 3, }, "test.4": { NumPartitions: 4, ReplicationFactor: 3, }, "test.64": { NumPartitions: 64, ReplicationFactor: 3, }, uncommittedTopic: { NumPartitions: 1, ReplicationFactor: 3, }, "test.1_to_2": { NumPartitions: 1, ReplicationFactor: 3, }, } FunctionalTestEnv *testEnvironment ) func TestMain(m *testing.M) { // Functional tests for Sarama // // You can either set TOXIPROXY_ADDR, which points at a toxiproxy address // already set up with 21801-21805 bound to zookeeper and 29091-29095 // bound to kafka. Alternatively, if TOXIPROXY_ADDR is not set, we'll try // and use Docker to bring up a 5-node zookeeper cluster & 5-node kafka // cluster, with toxiproxy configured as above. // // In either case, the following topics will be deleted (if they exist) and // then created/pre-seeded with data for the functional test run: // * uncommitted-topic-test-4 // * test.1 // * test.4 // * test.64 os.Exit(testMain(m)) } func testMain(m *testing.M) int { ctx := context.Background() var env testEnvironment if os.Getenv("DEBUG") == "true" { Logger = log.New(os.Stderr, "[DEBUG] ", log.Lmicroseconds|log.Ltime) } usingExisting, err := existingEnvironment(ctx, &env) if err != nil { panic(err) } if !usingExisting { err := prepareDockerTestEnvironment(ctx, &env) if err != nil { _ = tearDownDockerTestEnvironment(ctx, &env) panic(err) } defer tearDownDockerTestEnvironment(ctx, &env) // nolint:errcheck } if err := prepareTestTopics(ctx, &env); err != nil { panic(err) } FunctionalTestEnv = &env return m.Run() } // NewFunctionalTestConfig returns a config meant to be used by functional tests. func NewFunctionalTestConfig() *Config { config := NewConfig() // config.Consumer.Retry.Backoff = 0 // config.Producer.Retry.Backoff = 0 config.Version = MinVersion version, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION")) if err != nil { config.Version = DefaultVersion } else { config.Version = version } return config } type testEnvironment struct { ToxiproxyClient *toxiproxy.Client Proxies map[string]*toxiproxy.Proxy KafkaBrokerAddrs []string KafkaVersion string } // setupToxiProxies will configure the toxiproxy proxies with routes for the // kafka brokers if they don't already exist func setupToxiProxies(env *testEnvironment, endpoint string) error { env.ToxiproxyClient = toxiproxy.NewClient(endpoint) env.Proxies = map[string]*toxiproxy.Proxy{} env.KafkaBrokerAddrs = nil for i := 1; i <= 5; i++ { proxyName := fmt.Sprintf("kafka%d", i) proxy, err := env.ToxiproxyClient.Proxy(proxyName) if err != nil { proxy, err = env.ToxiproxyClient.CreateProxy( proxyName, fmt.Sprintf("0.0.0.0:%d", 29090+i), fmt.Sprintf("kafka-%d:%d", i, 29090+i), ) if err != nil { return fmt.Errorf("failed to create toxiproxy: %w", err) } } env.Proxies[proxyName] = proxy env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090+i)) } return nil } func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) error { const expectedBrokers = 5 Logger.Println("bringing up docker-based test environment") // Always (try to) tear down first. if err := tearDownDockerTestEnvironment(ctx, env); err != nil { return fmt.Errorf("failed to tear down existing env: %w", err) } if version, ok := os.LookupEnv("KAFKA_VERSION"); ok { env.KafkaVersion = version } else { env.KafkaVersion = "3.5.1" } // docker-compose v2.17.0 or newer required for `--wait-timeout` support c := exec.Command( "docker-compose", "up", "-d", "--quiet-pull", "--timestamps", "--wait", "--wait-timeout", "600", ) c.Stdout = os.Stdout c.Stderr = os.Stderr c.Env = append(os.Environ(), fmt.Sprintf("KAFKA_VERSION=%s", env.KafkaVersion)) err := c.Run() if err != nil { return fmt.Errorf("failed to run docker-compose to start test environment: %w", err) } if err := setupToxiProxies(env, "http://localhost:8474"); err != nil { return fmt.Errorf("failed to setup toxiproxies: %w", err) } dialCheck := func(addr string, timeout time.Duration) error { conn, err := net.DialTimeout("tcp", addr, timeout) if err != nil { return err } return conn.Close() } config := NewFunctionalTestConfig() config.Net.DialTimeout = 1 * time.Second config.Net.ReadTimeout = 1 * time.Second config.Net.WriteTimeout = 1 * time.Second config.ClientID = "sarama-tests" // wait for the kafka brokers to come up allBrokersUp := false Logger.Printf("waiting for kafka %s brokers to come up...\n", env.KafkaVersion) time.Sleep(10 * time.Second) mainLoop: for i := 0; i < 30 && !allBrokersUp; i++ { if i > 0 { Logger.Printf("still waiting for kafka %s brokers to come up...\n", env.KafkaVersion) } time.Sleep(3 * time.Second) brokersOk := make([]bool, len(env.KafkaBrokerAddrs)) // first check that all bootstrap brokers are TCP accessible for _, addr := range env.KafkaBrokerAddrs { if err := dialCheck(addr, time.Second); err != nil { continue mainLoop } } // now check we can bootstrap metadata from the cluster and all brokers // are known and accessible at their advertised address retryLoop: for j, addr := range env.KafkaBrokerAddrs { client, err := NewClient([]string{addr}, config) if err != nil { continue } err = client.RefreshMetadata() if err != nil { client.Close() continue } brokers := client.Brokers() if len(brokers) < expectedBrokers { client.Close() continue } for _, broker := range brokers { err := broker.Open(client.Config()) if err != nil && !errors.Is(err, ErrAlreadyConnected) { client.Close() continue retryLoop } connected, err := broker.Connected() if err != nil || !connected { broker.Close() client.Close() continue retryLoop } } client.Close() brokersOk[j] = true } allBrokersUp = true for _, u := range brokersOk { allBrokersUp = allBrokersUp && u } } if !allBrokersUp { c := exec.Command("docker-compose", "logs", "-t", "kafka-1", "kafka-2", "kafka-3", "kafka-4", "kafka-5") c.Stdout = os.Stdout c.Stderr = os.Stderr _ = c.Run() return fmt.Errorf("timed out waiting for one or more broker to come up") } return nil } func existingEnvironment(ctx context.Context, env *testEnvironment) (bool, error) { toxiproxyAddr, ok := os.LookupEnv("TOXIPROXY_ADDR") if !ok { return false, nil } toxiproxyURL, err := url.Parse(toxiproxyAddr) if err != nil { return false, fmt.Errorf("$TOXIPROXY_ADDR not parseable as url") } if err := setupToxiProxies(env, toxiproxyURL.String()); err != nil { return false, fmt.Errorf("failed to setup toxiproxies: %w", err) } env.KafkaVersion, ok = os.LookupEnv("KAFKA_VERSION") if !ok { return false, fmt.Errorf("KAFKA_VERSION needs to be provided with TOXIPROXY_ADDR") } return true, nil } func tearDownDockerTestEnvironment(ctx context.Context, env *testEnvironment) error { c := exec.Command("docker-compose", "down", "--volumes") c.Stdout = os.Stdout c.Stderr = os.Stderr downErr := c.Run() c = exec.Command("docker-compose", "rm", "-v", "--force", "--stop") c.Stdout = os.Stdout c.Stderr = os.Stderr rmErr := c.Run() if downErr != nil { return fmt.Errorf("failed to run docker-compose to stop test environment: %w", downErr) } if rmErr != nil { return fmt.Errorf("failed to run docker-compose to rm test environment: %w", rmErr) } return nil } func startDockerTestBroker(ctx context.Context, brokerID int32) error { service := fmt.Sprintf("kafka-%d", brokerID) c := exec.Command("docker-compose", "start", service) c.Stdout = os.Stdout c.Stderr = os.Stderr if err := c.Run(); err != nil { return fmt.Errorf("failed to run docker-compose to start test broker kafka-%d: %w", brokerID, err) } return nil } func stopDockerTestBroker(ctx context.Context, brokerID int32) error { service := fmt.Sprintf("kafka-%d", brokerID) c := exec.Command("docker-compose", "stop", service) c.Stdout = os.Stdout c.Stderr = os.Stderr if err := c.Run(); err != nil { return fmt.Errorf("failed to run docker-compose to stop test broker kafka-%d: %w", brokerID, err) } return nil } func prepareTestTopics(ctx context.Context, env *testEnvironment) error { Logger.Println("creating test topics") var testTopicNames []string for topic := range testTopicDetails { testTopicNames = append(testTopicNames, topic) } Logger.Println("Creating topics") config := NewFunctionalTestConfig() config.Metadata.Retry.Max = 5 config.Metadata.Retry.Backoff = 10 * time.Second config.ClientID = "sarama-prepareTestTopics" client, err := NewClient(env.KafkaBrokerAddrs, config) if err != nil { return fmt.Errorf("failed to connect to kafka: %w", err) } defer client.Close() controller, err := client.Controller() if err != nil { return fmt.Errorf("failed to connect to kafka controller: %w", err) } defer controller.Close() // Start by deleting the test topics (if they already exist) deleteRes, err := controller.DeleteTopics(&DeleteTopicsRequest{ Topics: testTopicNames, Timeout: time.Minute, }) if err != nil { return fmt.Errorf("failed to delete test topics: %w", err) } for topic, topicErr := range deleteRes.TopicErrorCodes { if !isTopicNotExistsErrorOrOk(topicErr) { return fmt.Errorf("failed to delete topic %s: %w", topic, topicErr) } } // wait for the topics to _actually_ be gone - the delete is not guaranteed to be processed // synchronously { var topicsOk bool for i := 0; i < 60 && !topicsOk; i++ { time.Sleep(1 * time.Second) md, err := controller.GetMetadata(&MetadataRequest{ Topics: testTopicNames, }) if err != nil { return fmt.Errorf("failed to get metadata for test topics: %w", err) } if len(md.Topics) == len(testTopicNames) { topicsOk = true for _, topicsMd := range md.Topics { if !isTopicNotExistsErrorOrOk(topicsMd.Err) { topicsOk = false } } } } if !topicsOk { return fmt.Errorf("timed out waiting for test topics to be gone") } } // now create the topics empty createRes, err := controller.CreateTopics(&CreateTopicsRequest{ TopicDetails: testTopicDetails, Timeout: time.Minute, }) if err != nil { return fmt.Errorf("failed to create test topics: %w", err) } for topic, topicErr := range createRes.TopicErrors { if !isTopicExistsErrorOrOk(topicErr.Err) { return fmt.Errorf("failed to create test topic %s: %w", topic, topicErr) } } // wait for the topics to _actually_ exist - the creates are not guaranteed to be processed // synchronously { var topicsOk bool for i := 0; i < 60 && !topicsOk; i++ { time.Sleep(1 * time.Second) md, err := controller.GetMetadata(&MetadataRequest{ Topics: testTopicNames, }) if err != nil { return fmt.Errorf("failed to get metadata for test topics: %w", err) } if len(md.Topics) == len(testTopicNames) { topicsOk = true for _, topicsMd := range md.Topics { if topicsMd.Err != ErrNoError { topicsOk = false } } } } if !topicsOk { return fmt.Errorf("timed out waiting for test topics to be created") } } return nil } func isTopicNotExistsErrorOrOk(err KError) bool { return errors.Is(err, ErrUnknownTopicOrPartition) || errors.Is(err, ErrInvalidTopic) || errors.Is(err, ErrNoError) } func isTopicExistsErrorOrOk(err KError) bool { return errors.Is(err, ErrTopicAlreadyExists) || errors.Is(err, ErrNoError) } func checkKafkaVersion(t testing.TB, requiredVersion string) { kafkaVersion := FunctionalTestEnv.KafkaVersion if kafkaVersion == "" { t.Skipf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) } else { available := parseKafkaVersion(kafkaVersion) required := parseKafkaVersion(requiredVersion) if !available.satisfies(required) { t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) } } } func resetProxies(t testing.TB) { if err := FunctionalTestEnv.ToxiproxyClient.ResetState(); err != nil { t.Error(err) } } func SaveProxy(t *testing.T, px string) { if _, err := FunctionalTestEnv.Proxies[px].Save(); err != nil { t.Fatal(err) } } func setupFunctionalTest(t testing.TB) { resetProxies(t) ensureFullyReplicated(t, 60*time.Second, 5*time.Second) } func teardownFunctionalTest(t testing.TB) { resetProxies(t) } func ensureFullyReplicated(t testing.TB, timeout time.Duration, retry time.Duration) { config := NewFunctionalTestConfig() config.Metadata.Full = false config.Metadata.RefreshFrequency = 0 config.Metadata.Retry.Max = 5 config.Metadata.Retry.Backoff = 10 * time.Second config.ClientID = "sarama-ensureFullyReplicated" config.ApiVersionsRequest = false var testTopicNames []string for topic := range testTopicDetails { testTopicNames = append(testTopicNames, topic) } timer := time.NewTimer(timeout) defer timer.Stop() tick := time.NewTicker(retry) defer tick.Stop() for { resp, err := func() (*MetadataResponse, error) { client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { return nil, fmt.Errorf("failed to connect to kafka: %w", err) } defer client.Close() broker := client.LeastLoadedBroker() defer broker.Close() request := NewMetadataRequest(config.Version, testTopicNames) return broker.GetMetadata(request) }() if err != nil { Logger.Printf("failed to get metadata during test setup: %v\n", err) } else { ok := true for _, topic := range resp.Topics { for _, partition := range topic.Partitions { if len(partition.Isr) != 3 { ok = false Logger.Printf("topic %s/%d is not fully-replicated Isr=%v Offline=%v\n", topic.Name, partition.ID, partition.Isr, partition.OfflineReplicas) } } } if ok { return } } select { case <-timer.C: t.Fatalf("timeout waiting for test topics to be fully replicated") case <-tick.C: } } } type kafkaVersion []int func (kv kafkaVersion) satisfies(other kafkaVersion) bool { var ov int for index, v := range kv { if len(other) <= index { ov = 0 } else { ov = other[index] } if v < ov { return false } else if v > ov { return true } } return true } func parseKafkaVersion(version string) kafkaVersion { numbers := strings.Split(version, ".") result := make(kafkaVersion, 0, len(numbers)) for _, number := range numbers { nr, _ := strconv.Atoi(number) result = append(result, nr) } return result } golang-github-ibm-sarama-1.43.2/go.mod000066400000000000000000000027641461256741300174670ustar00rootroot00000000000000module github.com/IBM/sarama go 1.19 require ( github.com/davecgh/go-spew v1.1.1 github.com/eapache/go-resiliency v1.6.0 github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 github.com/eapache/queue v1.1.0 github.com/fortytw2/leaktest v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/jcmturner/gofork v1.7.6 github.com/jcmturner/gokrb5/v8 v8.4.4 github.com/klauspost/compress v1.17.8 github.com/pierrec/lz4/v4 v4.1.21 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.24.0 golang.org/x/sync v0.7.0 ) require ( github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect golang.org/x/crypto v0.22.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) retract ( v1.32.0 // producer hangs on retry https://github.com/IBM/sarama/issues/2150 [v1.31.0, v1.31.1] // producer deadlock https://github.com/IBM/sarama/issues/2129 [v1.26.0, v1.26.1] // consumer fetch session allocation https://github.com/IBM/sarama/pull/1644 [v1.24.1, v1.25.0] // consumer group metadata reqs https://github.com/IBM/sarama/issues/1544 ) golang-github-ibm-sarama-1.43.2/go.sum000066400000000000000000000231061461256741300175050ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-ibm-sarama-1.43.2/gssapi_kerberos.go000066400000000000000000000163661461256741300220750ustar00rootroot00000000000000package sarama import ( "encoding/binary" "errors" "fmt" "io" "math" "strings" "time" "github.com/jcmturner/gofork/encoding/asn1" "github.com/jcmturner/gokrb5/v8/asn1tools" "github.com/jcmturner/gokrb5/v8/gssapi" "github.com/jcmturner/gokrb5/v8/iana/chksumtype" "github.com/jcmturner/gokrb5/v8/iana/keyusage" "github.com/jcmturner/gokrb5/v8/messages" "github.com/jcmturner/gokrb5/v8/types" ) const ( TOK_ID_KRB_AP_REQ = 256 GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 ) type GSSAPIConfig struct { AuthType int KeyTabPath string CCachePath string KerberosConfigPath string ServiceName string Username string Password string Realm string DisablePAFXFAST bool BuildSpn BuildSpnFunc } type GSSAPIKerberosAuth struct { Config *GSSAPIConfig ticket messages.Ticket encKey types.EncryptionKey NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error) step int } type KerberosClient interface { Login() error GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) Domain() string CName() types.PrincipalName Destroy() } type BuildSpnFunc func(serviceName, host string) string // writePackage appends length in big endian before the payload, and sends it to kafka func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { length := uint64(len(payload)) size := length + 4 // 4 byte length header + payload if size > math.MaxInt32 { return 0, errors.New("payload too large, will overflow int32") } finalPackage := make([]byte, size) copy(finalPackage[4:], payload) binary.BigEndian.PutUint32(finalPackage, uint32(length)) bytes, err := broker.conn.Write(finalPackage) if err != nil { return bytes, err } return bytes, nil } // readPackage reads payload length (4 bytes) and then reads the payload into []byte func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { bytesRead := 0 lengthInBytes := make([]byte, 4) bytes, err := io.ReadFull(broker.conn, lengthInBytes) if err != nil { return nil, bytesRead, err } bytesRead += bytes payloadLength := binary.BigEndian.Uint32(lengthInBytes) payloadBytes := make([]byte, payloadLength) // buffer for read.. bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes if err != nil { return payloadBytes, bytesRead, err } bytesRead += bytes return payloadBytes, bytesRead, nil } func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte { a := make([]byte, 24) flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf} binary.LittleEndian.PutUint32(a[:4], 16) for _, i := range flags { f := binary.LittleEndian.Uint32(a[20:24]) f |= uint32(i) binary.LittleEndian.PutUint32(a[20:24], f) } return a } /* * * Construct Kerberos AP_REQ package, conforming to RFC-4120 * https://tools.ietf.org/html/rfc4120#page-84 * */ func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( domain string, cname types.PrincipalName, ticket messages.Ticket, sessionKey types.EncryptionKey) ([]byte, error) { auth, err := types.NewAuthenticator(domain, cname) if err != nil { return nil, err } auth.Cksum = types.Checksum{ CksumType: chksumtype.GSSAPI, Checksum: krbAuth.newAuthenticatorChecksum(), } APReq, err := messages.NewAPReq( ticket, sessionKey, auth, ) if err != nil { return nil, err } aprBytes := make([]byte, 2) binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ) tb, err := APReq.Marshal() if err != nil { return nil, err } aprBytes = append(aprBytes, tb...) return aprBytes, nil } /* * * Append the GSS-API header to the payload, conforming to RFC-2743 * Section 3.1, Mechanism-Independent Token Format * * https://tools.ietf.org/html/rfc2743#page-81 * * GSSAPIHeader + * */ func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID()) if err != nil { return nil, err } tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload)) GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...) GSSHeader = append(GSSHeader, oidBytes...) GSSPackage := append(GSSHeader, payload...) return GSSPackage, nil } func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) { switch krbAuth.step { case GSS_API_INITIAL: aprBytes, err := krbAuth.createKrb5Token( kerberosClient.Domain(), kerberosClient.CName(), krbAuth.ticket, krbAuth.encKey) if err != nil { return nil, err } krbAuth.step = GSS_API_VERIFY return krbAuth.appendGSSAPIHeader(aprBytes) case GSS_API_VERIFY: wrapTokenReq := gssapi.WrapToken{} if err := wrapTokenReq.Unmarshal(bytes, true); err != nil { return nil, err } // Validate response. isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL) if !isValid { return nil, err } wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey) if err != nil { return nil, err } krbAuth.step = GSS_API_FINISH return wrapTokenResponse.Marshal() } return nil, nil } /* This does the handshake for authorization */ func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) if err != nil { Logger.Printf("Kerberos client error: %s", err) return err } err = kerberosClient.Login() if err != nil { Logger.Printf("Kerberos client error: %s", err) return err } // Construct SPN using serviceName and host // default SPN format: / host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part var spn string if krbAuth.Config.BuildSpn != nil { spn = krbAuth.Config.BuildSpn(broker.conf.Net.SASL.GSSAPI.ServiceName, host) } else { spn = fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) } ticket, encKey, err := kerberosClient.GetServiceTicket(spn) if err != nil { Logger.Printf("Error getting Kerberos service ticket : %s", err) return err } krbAuth.ticket = ticket krbAuth.encKey = encKey krbAuth.step = GSS_API_INITIAL var receivedBytes []byte = nil defer kerberosClient.Destroy() for { packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient) if err != nil { Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) return err } requestTime := time.Now() bytesWritten, err := krbAuth.writePackage(broker, packBytes) if err != nil { Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) return err } broker.updateOutgoingCommunicationMetrics(bytesWritten) if krbAuth.step == GSS_API_VERIFY { bytesRead := 0 receivedBytes, bytesRead, err = krbAuth.readPackage(broker) requestLatency := time.Since(requestTime) broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) if err != nil { Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) return err } } else if krbAuth.step == GSS_API_FINISH { return nil } } } golang-github-ibm-sarama-1.43.2/heartbeat_request.go000066400000000000000000000026301461256741300224070ustar00rootroot00000000000000package sarama type HeartbeatRequest struct { Version int16 GroupId string GenerationId int32 MemberId string GroupInstanceId *string } func (r *HeartbeatRequest) encode(pe packetEncoder) error { if err := pe.putString(r.GroupId); err != nil { return err } pe.putInt32(r.GenerationId) if err := pe.putString(r.MemberId); err != nil { return err } if r.Version >= 3 { if err := pe.putNullableString(r.GroupInstanceId); err != nil { return err } } return nil } func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } if r.GenerationId, err = pd.getInt32(); err != nil { return } if r.MemberId, err = pd.getString(); err != nil { return } if r.Version >= 3 { if r.GroupInstanceId, err = pd.getNullableString(); err != nil { return } } return nil } func (r *HeartbeatRequest) key() int16 { return 12 } func (r *HeartbeatRequest) version() int16 { return r.Version } func (r *HeartbeatRequest) headerVersion() int16 { return 1 } func (r *HeartbeatRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *HeartbeatRequest) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_8_2_0 default: return V2_3_0_0 } } golang-github-ibm-sarama-1.43.2/heartbeat_request_test.go000066400000000000000000000034051461256741300234470ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( basicHeartbeatRequestV0 = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID } basicHeartbeatRequestV3_GID = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID 0, 3, 'g', 'i', 'd', // Group Instance ID } basicHeartbeatRequestV3_NOGID = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID 255, 255, // Group Instance ID } ) func TestHeartbeatRequest(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *HeartbeatRequest }{ { "v0_basic", 0, basicHeartbeatRequestV0, &HeartbeatRequest{ Version: 0, GroupId: "foo", GenerationId: 0x00010203, MemberId: "baz", }, }, { "v3_basic", 3, basicHeartbeatRequestV3_GID, &HeartbeatRequest{ Version: 3, GroupId: "foo", GenerationId: 0x00010203, MemberId: "baz", GroupInstanceId: &groupInstanceId, }, }, { "v3_basic", 3, basicHeartbeatRequestV3_NOGID, &HeartbeatRequest{ Version: 3, GroupId: "foo", GenerationId: 0x00010203, MemberId: "baz", GroupInstanceId: nil, }, }, } for _, c := range tests { testEncodable(t, c.CaseName, c.Message, c.MessageBytes) request := new(HeartbeatRequest) testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, request) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request) } } } golang-github-ibm-sarama-1.43.2/heartbeat_response.go000066400000000000000000000023021461256741300225510ustar00rootroot00000000000000package sarama import "time" type HeartbeatResponse struct { Version int16 ThrottleTime int32 Err KError } func (r *HeartbeatResponse) encode(pe packetEncoder) error { if r.Version >= 1 { pe.putInt32(r.ThrottleTime) } pe.putInt16(int16(r.Err)) return nil } func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { var err error r.Version = version if r.Version >= 1 { if r.ThrottleTime, err = pd.getInt32(); err != nil { return err } } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) return nil } func (r *HeartbeatResponse) key() int16 { return 12 } func (r *HeartbeatResponse) version() int16 { return r.Version } func (r *HeartbeatResponse) headerVersion() int16 { return 0 } func (r *HeartbeatResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *HeartbeatResponse) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_8_2_0 default: return V2_3_0_0 } } func (r *HeartbeatResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTime) * time.Millisecond } golang-github-ibm-sarama-1.43.2/heartbeat_response_test.go000066400000000000000000000023451461256741300236170ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( heartbeatResponseNoError_V0 = []byte{ 0x00, 0x00, } heartbeatResponseNoError_V1 = []byte{ 0, 0, 0, 100, 0, 0, } heartbeatResponseError_V1 = []byte{ 0, 0, 0, 100, 0, byte(ErrFencedInstancedId), } ) func TestHeartbeatResponse(t *testing.T) { tests := []struct { CaseName string Version int16 MessageBytes []byte Message *HeartbeatResponse }{ { "v0_noErr", 0, heartbeatResponseNoError_V0, &HeartbeatResponse{ Version: 0, Err: ErrNoError, }, }, { "v1_noErr", 1, heartbeatResponseNoError_V1, &HeartbeatResponse{ Version: 1, Err: ErrNoError, ThrottleTime: 100, }, }, { "v1_Err", 1, heartbeatResponseError_V1, &HeartbeatResponse{ Version: 1, Err: ErrFencedInstancedId, ThrottleTime: 100, }, }, } for _, c := range tests { testEncodable(t, c.CaseName, c.Message, c.MessageBytes) response := new(HeartbeatResponse) testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response) } } } golang-github-ibm-sarama-1.43.2/incremental_alter_configs_request.go000066400000000000000000000067261461256741300256620ustar00rootroot00000000000000package sarama type IncrementalAlterConfigsOperation int8 const ( IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota IncrementalAlterConfigsOperationDelete IncrementalAlterConfigsOperationAppend IncrementalAlterConfigsOperationSubtract ) // IncrementalAlterConfigsRequest is an incremental alter config request type type IncrementalAlterConfigsRequest struct { Version int16 Resources []*IncrementalAlterConfigsResource ValidateOnly bool } type IncrementalAlterConfigsResource struct { Type ConfigResourceType Name string ConfigEntries map[string]IncrementalAlterConfigsEntry } type IncrementalAlterConfigsEntry struct { Operation IncrementalAlterConfigsOperation Value *string } func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } for _, r := range a.Resources { if err := r.encode(pe); err != nil { return err } } pe.putBool(a.ValidateOnly) return nil } func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error { resourceCount, err := pd.getArrayLength() if err != nil { return err } a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount) for i := range a.Resources { r := &IncrementalAlterConfigsResource{} err = r.decode(pd, version) if err != nil { return err } a.Resources[i] = r } validateOnly, err := pd.getBool() if err != nil { return err } a.ValidateOnly = validateOnly return nil } func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error { pe.putInt8(int8(a.Type)) if err := pe.putString(a.Name); err != nil { return err } if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { return err } for name, e := range a.ConfigEntries { if err := pe.putString(name); err != nil { return err } if err := e.encode(pe); err != nil { return err } } return nil } func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error { t, err := pd.getInt8() if err != nil { return err } a.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } a.Name = name n, err := pd.getArrayLength() if err != nil { return err } if n > 0 { a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n) for i := 0; i < n; i++ { name, err := pd.getString() if err != nil { return err } var v IncrementalAlterConfigsEntry if err := v.decode(pd, version); err != nil { return err } a.ConfigEntries[name] = v } } return err } func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error { pe.putInt8(int8(a.Operation)) if err := pe.putNullableString(a.Value); err != nil { return err } return nil } func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error { t, err := pd.getInt8() if err != nil { return err } a.Operation = IncrementalAlterConfigsOperation(t) s, err := pd.getNullableString() if err != nil { return err } a.Value = s return nil } func (a *IncrementalAlterConfigsRequest) key() int16 { return 44 } func (a *IncrementalAlterConfigsRequest) version() int16 { return a.Version } func (a *IncrementalAlterConfigsRequest) headerVersion() int16 { return 1 } func (a *IncrementalAlterConfigsRequest) isValidVersion() bool { return a.Version == 0 } func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion { return V2_3_0_0 } golang-github-ibm-sarama-1.43.2/incremental_alter_configs_request_test.go000066400000000000000000000045601461256741300267130ustar00rootroot00000000000000package sarama import "testing" var ( emptyIncrementalAlterConfigsRequest = []byte{ 0, 0, 0, 0, // 0 configs 0, // don't Validate } singleIncrementalAlterConfigsRequest = []byte{ 0, 0, 0, 1, // 1 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 1, // 1 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, // OperationSet 0, 4, '1', '0', '0', '0', 0, // don't validate } doubleIncrementalAlterConfigsRequest = []byte{ 0, 0, 0, 2, // 2 config 2, // a topic 0, 3, 'f', 'o', 'o', // topic name: foo 0, 0, 0, 1, // 1 config name 0, 10, // 10 chars 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', 0, // OperationSet 0, 4, '1', '0', '0', '0', 2, // a topic 0, 3, 'b', 'a', 'r', // topic name: foo 0, 0, 0, 1, // 2 config 0, 12, // 12 chars 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', 1, // OperationDelete 0, 4, '1', '0', '0', '0', 0, // don't validate } ) func TestIncrementalAlterConfigsRequest(t *testing.T) { var request *IncrementalAlterConfigsRequest request = &IncrementalAlterConfigsRequest{ Resources: []*IncrementalAlterConfigsResource{}, } testRequest(t, "no requests", request, emptyIncrementalAlterConfigsRequest) configValue := "1000" request = &IncrementalAlterConfigsRequest{ Resources: []*IncrementalAlterConfigsResource{ { Type: TopicResource, Name: "foo", ConfigEntries: map[string]IncrementalAlterConfigsEntry{ "segment.ms": { Operation: IncrementalAlterConfigsOperationSet, Value: &configValue, }, }, }, }, } testRequest(t, "one config", request, singleIncrementalAlterConfigsRequest) request = &IncrementalAlterConfigsRequest{ Resources: []*IncrementalAlterConfigsResource{ { Type: TopicResource, Name: "foo", ConfigEntries: map[string]IncrementalAlterConfigsEntry{ "segment.ms": { Operation: IncrementalAlterConfigsOperationSet, Value: &configValue, }, }, }, { Type: TopicResource, Name: "bar", ConfigEntries: map[string]IncrementalAlterConfigsEntry{ "retention.ms": { Operation: IncrementalAlterConfigsOperationDelete, Value: &configValue, }, }, }, }, } testRequest(t, "two configs", request, doubleIncrementalAlterConfigsRequest) } golang-github-ibm-sarama-1.43.2/incremental_alter_configs_response.go000066400000000000000000000031531461256741300260170ustar00rootroot00000000000000package sarama import "time" // IncrementalAlterConfigsResponse is a response type for incremental alter config type IncrementalAlterConfigsResponse struct { Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error { pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } for _, v := range a.Resources { if err := v.encode(pe); err != nil { return err } } return nil } func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond responseCount, err := pd.getArrayLength() if err != nil { return err } a.Resources = make([]*AlterConfigsResourceResponse, responseCount) for i := range a.Resources { a.Resources[i] = new(AlterConfigsResourceResponse) if err := a.Resources[i].decode(pd, version); err != nil { return err } } return nil } func (a *IncrementalAlterConfigsResponse) key() int16 { return 44 } func (a *IncrementalAlterConfigsResponse) version() int16 { return a.Version } func (a *IncrementalAlterConfigsResponse) headerVersion() int16 { return 0 } func (a *IncrementalAlterConfigsResponse) isValidVersion() bool { return a.Version == 0 } func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion { return V2_3_0_0 } func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/incremental_alter_configs_response_test.go000066400000000000000000000017011461256741300270530ustar00rootroot00000000000000package sarama import ( "testing" ) var ( incrementalAlterResponseEmpty = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 0, // no configs } incrementalAlterResponsePopulated = []byte{ 0, 0, 0, 0, // throttle 0, 0, 0, 1, // response 0, 0, // errorcode 0, 0, // string 2, // topic 0, 3, 'f', 'o', 'o', } ) func TestIncrementalAlterConfigsResponse(t *testing.T) { var response *IncrementalAlterConfigsResponse response = &IncrementalAlterConfigsResponse{ Resources: []*AlterConfigsResourceResponse{}, } testVersionDecodable(t, "empty", response, incrementalAlterResponseEmpty, 0) if len(response.Resources) != 0 { t.Error("Expected no groups") } response = &IncrementalAlterConfigsResponse{ Resources: []*AlterConfigsResourceResponse{ { ErrorCode: 0, ErrorMsg: "", Type: TopicResource, Name: "foo", }, }, } testResponse(t, "response with error", response, incrementalAlterResponsePopulated) } golang-github-ibm-sarama-1.43.2/init_producer_id_request.go000066400000000000000000000037511461256741300237770ustar00rootroot00000000000000package sarama import "time" type InitProducerIDRequest struct { Version int16 TransactionalID *string TransactionTimeout time.Duration ProducerID int64 ProducerEpoch int16 } func (i *InitProducerIDRequest) encode(pe packetEncoder) error { if i.Version < 2 { if err := pe.putNullableString(i.TransactionalID); err != nil { return err } } else { if err := pe.putNullableCompactString(i.TransactionalID); err != nil { return err } } pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) if i.Version >= 3 { pe.putInt64(i.ProducerID) pe.putInt16(i.ProducerEpoch) } if i.Version >= 2 { pe.putEmptyTaggedFieldArray() } return nil } func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { i.Version = version if i.Version < 2 { if i.TransactionalID, err = pd.getNullableString(); err != nil { return err } } else { if i.TransactionalID, err = pd.getCompactNullableString(); err != nil { return err } } timeout, err := pd.getInt32() if err != nil { return err } i.TransactionTimeout = time.Duration(timeout) * time.Millisecond if i.Version >= 3 { if i.ProducerID, err = pd.getInt64(); err != nil { return err } if i.ProducerEpoch, err = pd.getInt16(); err != nil { return err } } if i.Version >= 2 { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (i *InitProducerIDRequest) key() int16 { return 22 } func (i *InitProducerIDRequest) version() int16 { return i.Version } func (i *InitProducerIDRequest) headerVersion() int16 { if i.Version >= 2 { return 2 } return 1 } func (i *InitProducerIDRequest) isValidVersion() bool { return i.Version >= 0 && i.Version <= 4 } func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { switch i.Version { case 4: return V2_7_0_0 case 3: return V2_5_0_0 case 2: return V2_4_0_0 case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_7_0_0 } } golang-github-ibm-sarama-1.43.2/init_producer_id_request_test.go000066400000000000000000000022061461256741300250300ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( initProducerIDRequestNull = []byte{ 255, 255, 0, 0, 0, 100, } initProducerIDRequest = []byte{ 0, 3, 't', 'x', 'n', 0, 0, 0, 100, } initProducerIDRequestTaggedFields = []byte{ 4, 116, 120, 110, // TransactionID in compact string 0, 0, 0, 100, // TransactionTimeout 0, // empty TaggedFields } initProducerIDRequestProducerId = []byte{ 4, 116, 120, 110, // TransactionID in compact string 0, 0, 0, 100, // TransactionTimeout 0, 0, 0, 0, 0, 0, 0, 123, // ProducerID 1, 65, // ProducerEpoch 0, // empty TaggedFields } ) func TestInitProducerIDRequest(t *testing.T) { req := &InitProducerIDRequest{ TransactionTimeout: 100 * time.Millisecond, } testRequest(t, "null transaction id", req, initProducerIDRequestNull) transactionID := "txn" req.TransactionalID = &transactionID testRequest(t, "transaction id", req, initProducerIDRequest) req.Version = 2 testRequest(t, "tagged fields", req, initProducerIDRequestTaggedFields) req.Version = 3 req.ProducerID = 123 req.ProducerEpoch = 321 testRequest(t, "producer id", req, initProducerIDRequestProducerId) } golang-github-ibm-sarama-1.43.2/init_producer_id_response.go000066400000000000000000000032511461256741300241400ustar00rootroot00000000000000package sarama import "time" type InitProducerIDResponse struct { ThrottleTime time.Duration Err KError Version int16 ProducerID int64 ProducerEpoch int16 } func (i *InitProducerIDResponse) encode(pe packetEncoder) error { pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) pe.putInt16(int16(i.Err)) pe.putInt64(i.ProducerID) pe.putInt16(i.ProducerEpoch) if i.Version >= 2 { pe.putEmptyTaggedFieldArray() } return nil } func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { i.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond kerr, err := pd.getInt16() if err != nil { return err } i.Err = KError(kerr) if i.ProducerID, err = pd.getInt64(); err != nil { return err } if i.ProducerEpoch, err = pd.getInt16(); err != nil { return err } if i.Version >= 2 { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (i *InitProducerIDResponse) key() int16 { return 22 } func (i *InitProducerIDResponse) version() int16 { return i.Version } func (i *InitProducerIDResponse) headerVersion() int16 { if i.Version >= 2 { return 1 } return 0 } func (i *InitProducerIDResponse) isValidVersion() bool { return i.Version >= 0 && i.Version <= 4 } func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { switch i.Version { case 4: return V2_7_0_0 case 3: return V2_5_0_0 case 2: return V2_4_0_0 case 1: return V2_0_0_0 default: return V0_11_0_0 } } func (r *InitProducerIDResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/init_producer_id_response_test.go000066400000000000000000000015701461256741300252010ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( initProducerIDResponse = []byte{ 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 31, 64, // producerID = 8000 0, 0, // epoch } initProducerIDRequestError = []byte{ 0, 0, 0, 100, 0, 51, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, } initProducerIdResponseWithTaggedFields = []byte{ 0, 0, 0, 100, 0, 51, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, } ) func TestInitProducerIDResponse(t *testing.T) { resp := &InitProducerIDResponse{ ThrottleTime: 100 * time.Millisecond, ProducerID: 8000, ProducerEpoch: 0, } testResponse(t, "", resp, initProducerIDResponse) resp.Err = ErrConcurrentTransactions resp.ProducerID = -1 testResponse(t, "with error", resp, initProducerIDRequestError) resp.Version = 2 testResponse(t, "with tagged fields", resp, initProducerIdResponseWithTaggedFields) } golang-github-ibm-sarama-1.43.2/interceptors.go000066400000000000000000000031421461256741300214200ustar00rootroot00000000000000package sarama // ProducerInterceptor allows you to intercept (and possibly mutate) the records // received by the producer before they are published to the Kafka cluster. // https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation type ProducerInterceptor interface { // OnSend is called when the producer message is intercepted. Please avoid // modifying the message until it's safe to do so, as this is _not_ a copy // of the message. OnSend(*ProducerMessage) } // ConsumerInterceptor allows you to intercept (and possibly mutate) the records // received by the consumer before they are sent to the messages channel. // https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation type ConsumerInterceptor interface { // OnConsume is called when the consumed message is intercepted. Please // avoid modifying the message until it's safe to do so, as this is _not_ a // copy of the message. OnConsume(*ConsumerMessage) } func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) { defer func() { if r := recover(); r != nil { Logger.Printf("Error when calling producer interceptor: %v, %v", interceptor, r) } }() interceptor.OnSend(msg) } func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) { defer func() { if r := recover(); r != nil { Logger.Printf("Error when calling consumer interceptor: %v, %v", interceptor, r) } }() interceptor.OnConsume(msg) } golang-github-ibm-sarama-1.43.2/internal/000077500000000000000000000000001461256741300201645ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/internal/toxiproxy/000077500000000000000000000000001461256741300222515ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/internal/toxiproxy/README.md000066400000000000000000000003761461256741300235360ustar00rootroot00000000000000# toxiproxy A minimal client implementation to setup proxies and toxics in toxiproxy as used in the FV suite. We have our own minimal client implementation to avoid having to pull in the toxiproxy repo which carries a number of transitive dependencies. golang-github-ibm-sarama-1.43.2/internal/toxiproxy/client.go000066400000000000000000000041301461256741300240540ustar00rootroot00000000000000package toxiproxy import ( "encoding/json" "fmt" "io" "net" "net/http" "time" ) type Client struct { httpClient *http.Client endpoint string } func NewClient(endpoint string) *Client { return &Client{ httpClient: &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, ForceAttemptHTTP2: true, MaxIdleConns: -1, DisableKeepAlives: true, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, }, }, endpoint: endpoint, } } func (c *Client) CreateProxy( name string, listenAddr string, targetAddr string, ) (*Proxy, error) { proxy := &Proxy{ Name: name, ListenAddr: listenAddr, TargetAddr: targetAddr, Enabled: true, client: c, } return proxy.Save() } func (c *Client) Proxy(name string) (*Proxy, error) { req, err := http.NewRequest("GET", c.endpoint+"/proxies/"+name, nil) if err != nil { return nil, fmt.Errorf("failed to make proxy request: %w", err) } resp, err := c.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("failed to http get proxy: %w", err) } defer resp.Body.Close() if resp.StatusCode != 200 { body, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("error getting proxy %s: %s %s", name, resp.Status, body) } var p Proxy if err := json.NewDecoder(resp.Body).Decode(&p); err != nil { return nil, fmt.Errorf("error decoding json for proxy %s: %w", name, err) } p.client = c return &p, nil } func (c *Client) ResetState() error { req, err := http.NewRequest("POST", c.endpoint+"/reset", http.NoBody) if err != nil { return fmt.Errorf("failed to make reset request: %w", err) } resp, err := c.httpClient.Do(req) if err != nil { return fmt.Errorf("failed to http post reset: %w", err) } defer resp.Body.Close() if resp.StatusCode != 204 { body, _ := io.ReadAll(resp.Body) return fmt.Errorf("error resetting proxies: %s %s", resp.Status, body) } return nil } golang-github-ibm-sarama-1.43.2/internal/toxiproxy/proxy.go000066400000000000000000000053511461256741300237650ustar00rootroot00000000000000package toxiproxy import ( "bytes" "encoding/json" "fmt" "io" "net/http" ) type Proxy struct { client *Client Name string `json:"name"` ListenAddr string `json:"listen"` TargetAddr string `json:"upstream"` Enabled bool `json:"enabled"` } type Attributes map[string]int func (p *Proxy) AddToxic( name string, toxicType string, stream string, toxicity float32, attributes Attributes, ) (*Toxic, error) { toxic := &Toxic{ Name: name, Type: toxicType, Stream: stream, Toxicity: toxicity, Attributes: attributes, } var b bytes.Buffer if err := json.NewEncoder(&b).Encode(&toxic); err != nil { return nil, fmt.Errorf("failed to json encode toxic: %w", err) } body := bytes.NewReader(b.Bytes()) c := p.client req, err := http.NewRequest("POST", c.endpoint+"/proxies/"+p.Name+"/toxics", body) if err != nil { return nil, fmt.Errorf("failed to make post toxic request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err := c.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("failed to http post toxic: %w", err) } defer resp.Body.Close() if resp.StatusCode != 200 { body, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("error creating toxic %s: %s %s", name, resp.Status, body) } return toxic, nil } func (p *Proxy) Enable() error { p.Enabled = true _, err := p.Save() return err } func (p *Proxy) Disable() error { p.Enabled = false _, err := p.Save() return err } func (p *Proxy) Save() (*Proxy, error) { var b bytes.Buffer if err := json.NewEncoder(&b).Encode(&p); err != nil { return nil, fmt.Errorf("failed to json encode proxy: %w", err) } body := bytes.NewReader(b.Bytes()) c := p.client req, err := http.NewRequest("POST", c.endpoint+"/proxies/"+p.Name, body) if err != nil { return nil, fmt.Errorf("failed to make post proxy request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err := c.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("failed to http post proxy: %w", err) } defer resp.Body.Close() if resp.StatusCode == 404 { if _, err := body.Seek(0, io.SeekStart); err != nil { return nil, fmt.Errorf("failed to rewind post body: %w", err) } req, err = http.NewRequest("POST", c.endpoint+"/proxies", body) if err != nil { return nil, fmt.Errorf("failed to make post proxy request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err = c.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("failed to http post proxy: %w", err) } defer resp.Body.Close() } if resp.StatusCode != 200 && resp.StatusCode != 201 { body, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("error saving proxy: %s %s", resp.Status, body) } return p, nil } golang-github-ibm-sarama-1.43.2/internal/toxiproxy/toxic.go000066400000000000000000000003701461256741300237260ustar00rootroot00000000000000package toxiproxy type Toxic struct { Name string `json:"name"` Type string `json:"type"` Stream string `json:"stream,omitempty"` Toxicity float32 `json:"toxicity"` Attributes Attributes `json:"attributes"` } golang-github-ibm-sarama-1.43.2/join_group_request.go000066400000000000000000000113371461256741300226270ustar00rootroot00000000000000package sarama type GroupProtocol struct { // Name contains the protocol name. Name string // Metadata contains the protocol metadata. Metadata []byte } func (p *GroupProtocol) decode(pd packetDecoder) (err error) { p.Name, err = pd.getString() if err != nil { return err } p.Metadata, err = pd.getBytes() return err } func (p *GroupProtocol) encode(pe packetEncoder) (err error) { if err := pe.putString(p.Name); err != nil { return err } if err := pe.putBytes(p.Metadata); err != nil { return err } return nil } type JoinGroupRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // GroupId contains the group identifier. GroupId string // SessionTimeout specifies that the coordinator should consider the consumer // dead if it receives no heartbeat after this timeout in milliseconds. SessionTimeout int32 // RebalanceTimeout contains the maximum time in milliseconds that the // coordinator will wait for each member to rejoin when rebalancing the // group. RebalanceTimeout int32 // MemberId contains the member id assigned by the group coordinator. MemberId string // GroupInstanceId contains the unique identifier of the consumer instance // provided by end user. GroupInstanceId *string // ProtocolType contains the unique name the for class of protocols // implemented by the group we want to join. ProtocolType string // GroupProtocols contains the list of protocols that the member supports. // deprecated; use OrderedGroupProtocols GroupProtocols map[string][]byte // OrderedGroupProtocols contains an ordered list of protocols that the member // supports. OrderedGroupProtocols []*GroupProtocol } func (r *JoinGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.GroupId); err != nil { return err } pe.putInt32(r.SessionTimeout) if r.Version >= 1 { pe.putInt32(r.RebalanceTimeout) } if err := pe.putString(r.MemberId); err != nil { return err } if r.Version >= 5 { if err := pe.putNullableString(r.GroupInstanceId); err != nil { return err } } if err := pe.putString(r.ProtocolType); err != nil { return err } if len(r.GroupProtocols) > 0 { if len(r.OrderedGroupProtocols) > 0 { return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} } if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { return err } for name, metadata := range r.GroupProtocols { if err := pe.putString(name); err != nil { return err } if err := pe.putBytes(metadata); err != nil { return err } } } else { if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { return err } for _, protocol := range r.OrderedGroupProtocols { if err := protocol.encode(pe); err != nil { return err } } } return nil } func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } if r.SessionTimeout, err = pd.getInt32(); err != nil { return } if version >= 1 { if r.RebalanceTimeout, err = pd.getInt32(); err != nil { return err } } if r.MemberId, err = pd.getString(); err != nil { return } if version >= 5 { if r.GroupInstanceId, err = pd.getNullableString(); err != nil { return } } if r.ProtocolType, err = pd.getString(); err != nil { return } n, err := pd.getArrayLength() if err != nil { return err } if n == 0 { return nil } r.GroupProtocols = make(map[string][]byte) for i := 0; i < n; i++ { protocol := &GroupProtocol{} if err := protocol.decode(pd); err != nil { return err } r.GroupProtocols[protocol.Name] = protocol.Metadata r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) } return nil } func (r *JoinGroupRequest) key() int16 { return 11 } func (r *JoinGroupRequest) version() int16 { return r.Version } func (r *JoinGroupRequest) headerVersion() int16 { return 1 } func (r *JoinGroupRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 5 } func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 5: return V2_3_0_0 case 4: return V2_2_0_0 case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 case 0: return V0_10_0_0 default: return V2_3_0_0 } } func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ Name: name, Metadata: metadata, }) } func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { bin, err := encode(metadata, nil) if err != nil { return err } r.AddGroupProtocol(name, bin) return nil } golang-github-ibm-sarama-1.43.2/join_group_request_test.go000066400000000000000000000111451461256741300236630ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( joinGroupRequestV0_NoProtocols = []byte{ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID 0, 0, 0, 100, // Session timeout 0, 0, // Member ID 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type 0, 0, 0, 0, // 0 protocol groups } joinGroupRequestV0_OneProtocol = []byte{ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID 0, 0, 0, 100, // Session timeout 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type 0, 0, 0, 1, // 1 group protocol 0, 3, 'o', 'n', 'e', // Protocol name 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata } joinGroupRequestV1 = []byte{ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID 0, 0, 0, 100, // Session timeout 0, 0, 0, 200, // Rebalance timeout 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type 0, 0, 0, 1, // 1 group protocol 0, 3, 'o', 'n', 'e', // Protocol name 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata } ) func TestJoinGroupRequest(t *testing.T) { request := new(JoinGroupRequest) request.GroupId = "TestGroup" request.SessionTimeout = 100 request.ProtocolType = "consumer" testRequest(t, "V0: no protocols", request, joinGroupRequestV0_NoProtocols) } func TestJoinGroupRequestV0_OneProtocol(t *testing.T) { request := new(JoinGroupRequest) request.GroupId = "TestGroup" request.SessionTimeout = 100 request.MemberId = "OneProtocol" request.ProtocolType = "consumer" request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol) request.GroupProtocols = make(map[string][]byte) request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} testRequestDecode(t, "V0: one protocol", request, packet) } func TestJoinGroupRequestDeprecatedEncode(t *testing.T) { request := new(JoinGroupRequest) request.GroupId = "TestGroup" request.SessionTimeout = 100 request.MemberId = "OneProtocol" request.ProtocolType = "consumer" request.GroupProtocols = make(map[string][]byte) request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol) request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) testRequestDecode(t, "V0: one protocol", request, packet) } func TestJoinGroupRequestV1(t *testing.T) { request := new(JoinGroupRequest) request.Version = 1 request.GroupId = "TestGroup" request.SessionTimeout = 100 request.RebalanceTimeout = 200 request.MemberId = "OneProtocol" request.ProtocolType = "consumer" request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) packet := testRequestEncode(t, "V1", request, joinGroupRequestV1) request.GroupProtocols = make(map[string][]byte) request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} testRequestDecode(t, "V1", request, packet) } var ( joinGroupRequestV5 = []byte{ 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID 0, 0, 0, 100, // Session timeout 0, 0, 0, 200, // Rebalance timeout 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID 0, 3, 'g', 'i', 'd', // GroupInstanceId 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type 0, 0, 0, 1, // 1 group protocol 0, 3, 'o', 'n', 'e', // Protocol name 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata } ) func TestJoinGroupRequestV3plus(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *JoinGroupRequest }{ { "v5", 5, joinGroupRequestV5, &JoinGroupRequest{ Version: 5, GroupId: "TestGroup", SessionTimeout: 100, RebalanceTimeout: 200, MemberId: "OneProtocol", GroupInstanceId: &groupInstanceId, ProtocolType: "consumer", GroupProtocols: map[string][]byte{ "one": {1, 2, 3}, }, OrderedGroupProtocols: []*GroupProtocol{ {Name: "one", Metadata: []byte{1, 2, 3}}, }, }, }, } for _, c := range tests { request := new(JoinGroupRequest) testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, request) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request) } // This is to avoid error check "cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest" c.Message.GroupProtocols = nil testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/join_group_response.go000066400000000000000000000077301461256741300227770ustar00rootroot00000000000000package sarama import "time" type JoinGroupResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTime contains the duration for which the request was throttled due // to a quota violation, or zero if the request did not violate any quota. ThrottleTime int32 // Err contains the error code, or 0 if there was no error. Err KError // GenerationId contains the generation ID of the group. GenerationId int32 // GroupProtocol contains the group protocol selected by the coordinator. GroupProtocol string // LeaderId contains the leader of the group. LeaderId string // MemberId contains the member ID assigned by the group coordinator. MemberId string // Members contains the per-group-member information. Members []GroupMember } type GroupMember struct { // MemberId contains the group member ID. MemberId string // GroupInstanceId contains the unique identifier of the consumer instance // provided by end user. GroupInstanceId *string // Metadata contains the group member metadata. Metadata []byte } func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) for _, member := range r.Members { meta := new(ConsumerGroupMemberMetadata) if err := decode(member.Metadata, meta, nil); err != nil { return nil, err } members[member.MemberId] = *meta } return members, nil } func (r *JoinGroupResponse) encode(pe packetEncoder) error { if r.Version >= 2 { pe.putInt32(r.ThrottleTime) } pe.putInt16(int16(r.Err)) pe.putInt32(r.GenerationId) if err := pe.putString(r.GroupProtocol); err != nil { return err } if err := pe.putString(r.LeaderId); err != nil { return err } if err := pe.putString(r.MemberId); err != nil { return err } if err := pe.putArrayLength(len(r.Members)); err != nil { return err } for _, member := range r.Members { if err := pe.putString(member.MemberId); err != nil { return err } if r.Version >= 5 { if err := pe.putNullableString(member.GroupInstanceId); err != nil { return err } } if err := pe.putBytes(member.Metadata); err != nil { return err } } return nil } func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if version >= 2 { if r.ThrottleTime, err = pd.getInt32(); err != nil { return } } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) if r.GenerationId, err = pd.getInt32(); err != nil { return } if r.GroupProtocol, err = pd.getString(); err != nil { return } if r.LeaderId, err = pd.getString(); err != nil { return } if r.MemberId, err = pd.getString(); err != nil { return } n, err := pd.getArrayLength() if err != nil { return err } if n == 0 { return nil } r.Members = make([]GroupMember, n) for i := 0; i < n; i++ { memberId, err := pd.getString() if err != nil { return err } var groupInstanceId *string = nil if r.Version >= 5 { groupInstanceId, err = pd.getNullableString() if err != nil { return err } } memberMetadata, err := pd.getBytes() if err != nil { return err } r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata} } return nil } func (r *JoinGroupResponse) key() int16 { return 11 } func (r *JoinGroupResponse) version() int16 { return r.Version } func (r *JoinGroupResponse) headerVersion() int16 { return 0 } func (r *JoinGroupResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 5 } func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 5: return V2_3_0_0 case 4: return V2_2_0_0 case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 case 0: return V0_10_0_0 default: return V2_3_0_0 } } func (r *JoinGroupResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTime) * time.Millisecond } golang-github-ibm-sarama-1.43.2/join_group_response_test.go000066400000000000000000000157741461256741300240450ustar00rootroot00000000000000package sarama import ( "errors" "reflect" "testing" ) var ( joinGroupResponseV0_NoError = []byte{ 0x00, 0x00, // No error 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen 0, 3, 'f', 'o', 'o', // Leader ID 0, 3, 'b', 'a', 'r', // Member ID 0, 0, 0, 0, // No member info } joinGroupResponseV0_WithError = []byte{ 0, 23, // Error: inconsistent group protocol 0x00, 0x00, 0x00, 0x00, // Generation ID 0, 0, // Protocol name chosen 0, 0, // Leader ID 0, 0, // Member ID 0, 0, 0, 0, // No member info } joinGroupResponseV0_Leader = []byte{ 0x00, 0x00, // No error 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen 0, 3, 'f', 'o', 'o', // Leader ID 0, 3, 'f', 'o', 'o', // Member ID == Leader ID 0, 0, 0, 1, // 1 member 0, 3, 'f', 'o', 'o', // Member ID 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata } joinGroupResponseV1 = []byte{ 0x00, 0x00, // No error 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen 0, 3, 'f', 'o', 'o', // Leader ID 0, 3, 'b', 'a', 'r', // Member ID 0, 0, 0, 0, // No member info } joinGroupResponseV2 = []byte{ 0, 0, 0, 100, 0x00, 0x00, // No error 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen 0, 3, 'f', 'o', 'o', // Leader ID 0, 3, 'b', 'a', 'r', // Member ID 0, 0, 0, 0, // No member info } ) func TestJoinGroupResponseV0(t *testing.T) { var response *JoinGroupResponse response = new(JoinGroupResponse) testVersionDecodable(t, "no error", response, joinGroupResponseV0_NoError, 0) if !errors.Is(response.Err, ErrNoError) { t.Error("Decoding Err failed: no error expected but found", response.Err) } if response.GenerationId != 66051 { t.Error("Decoding GenerationId failed, found:", response.GenerationId) } if response.LeaderId != "foo" { t.Error("Decoding LeaderId failed, found:", response.LeaderId) } if response.MemberId != "bar" { t.Error("Decoding MemberId failed, found:", response.MemberId) } if len(response.Members) != 0 { t.Error("Decoding Members failed, found:", response.Members) } response = new(JoinGroupResponse) testVersionDecodable(t, "with error", response, joinGroupResponseV0_WithError, 0) if !errors.Is(response.Err, ErrInconsistentGroupProtocol) { t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err) } if response.GenerationId != 0 { t.Error("Decoding GenerationId failed, found:", response.GenerationId) } if response.LeaderId != "" { t.Error("Decoding LeaderId failed, found:", response.LeaderId) } if response.MemberId != "" { t.Error("Decoding MemberId failed, found:", response.MemberId) } if len(response.Members) != 0 { t.Error("Decoding Members failed, found:", response.Members) } response = new(JoinGroupResponse) testVersionDecodable(t, "with error", response, joinGroupResponseV0_Leader, 0) if !errors.Is(response.Err, ErrNoError) { t.Error("Decoding Err failed: ErrNoError expected but found", response.Err) } if response.GenerationId != 66051 { t.Error("Decoding GenerationId failed, found:", response.GenerationId) } if response.LeaderId != "foo" { t.Error("Decoding LeaderId failed, found:", response.LeaderId) } if response.MemberId != "foo" { t.Error("Decoding MemberId failed, found:", response.MemberId) } if len(response.Members) != 1 { t.Error("Decoding Members failed, found:", response.Members) } if response.Members[0].MemberId != "foo" { t.Error("Decoding MemberId failed, found:", response.Members[0].MemberId) } if !reflect.DeepEqual(response.Members[0].Metadata, []byte{0x01, 0x02, 0x03}) { t.Error("Decoding foo member failed, found:", response.Members[0].Metadata) } } func TestJoinGroupResponseV1(t *testing.T) { response := new(JoinGroupResponse) testVersionDecodable(t, "no error", response, joinGroupResponseV1, 1) if !errors.Is(response.Err, ErrNoError) { t.Error("Decoding Err failed: no error expected but found", response.Err) } if response.GenerationId != 66051 { t.Error("Decoding GenerationId failed, found:", response.GenerationId) } if response.GroupProtocol != "protocol" { t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol) } if response.LeaderId != "foo" { t.Error("Decoding LeaderId failed, found:", response.LeaderId) } if response.MemberId != "bar" { t.Error("Decoding MemberId failed, found:", response.MemberId) } if response.Version != 1 { t.Error("Decoding Version failed, found:", response.Version) } if len(response.Members) != 0 { t.Error("Decoding Members failed, found:", response.Members) } } func TestJoinGroupResponseV2(t *testing.T) { response := new(JoinGroupResponse) testVersionDecodable(t, "no error", response, joinGroupResponseV2, 2) if response.ThrottleTime != 100 { t.Error("Decoding ThrottleTime failed, found:", response.ThrottleTime) } if !errors.Is(response.Err, ErrNoError) { t.Error("Decoding Err failed: no error expected but found", response.Err) } if response.GenerationId != 66051 { t.Error("Decoding GenerationId failed, found:", response.GenerationId) } if response.GroupProtocol != "protocol" { t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol) } if response.LeaderId != "foo" { t.Error("Decoding LeaderId failed, found:", response.LeaderId) } if response.MemberId != "bar" { t.Error("Decoding MemberId failed, found:", response.MemberId) } if response.Version != 2 { t.Error("Decoding Version failed, found:", response.Version) } if len(response.Members) != 0 { t.Error("Decoding Members failed, found:", response.Members) } } var ( joinGroupResponseV5 = []byte{ 0, 0, 0, 100, // ThrottleTimeMs 0x00, 0x00, // No error 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen 0, 3, 'f', 'o', 'o', // Leader ID 0, 3, 'b', 'a', 'r', // Member ID 0, 0, 0, 1, // One member info 0, 3, 'm', 'i', 'd', // memberId 0, 3, 'g', 'i', 'd', // GroupInstanceId 0, 0, 0, 3, 1, 2, 3, // Metadata } ) func TestJoinGroupResponse3plus(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *JoinGroupResponse }{ { "v5", 5, joinGroupResponseV5, &JoinGroupResponse{ Version: 5, ThrottleTime: 100, Err: ErrNoError, GenerationId: 0x00010203, GroupProtocol: "protocol", LeaderId: "foo", MemberId: "bar", Members: []GroupMember{ {"mid", &groupInstanceId, []byte{1, 2, 3}}, }, }, }, } for _, c := range tests { response := new(JoinGroupResponse) testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/kerberos_client.go000066400000000000000000000034001461256741300220460ustar00rootroot00000000000000package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) type KerberosGoKrb5Client struct { krb5client.Client } func (c *KerberosGoKrb5Client) Domain() string { return c.Credentials.Domain() } func (c *KerberosGoKrb5Client) CName() types.PrincipalName { return c.Credentials.CName() } // NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens. // It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120). // uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities. func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { cfg, err := krb5config.Load(config.KerberosConfigPath) if err != nil { return nil, err } return createClient(config, cfg) } func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client switch config.AuthType { case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) case KRB5_CCACHE_AUTH: cc, err := credentials.LoadCCache(config.CCachePath) if err != nil { return nil, err } client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) if err != nil { return nil, err } default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } return &KerberosGoKrb5Client{*client}, nil } golang-github-ibm-sarama-1.43.2/kerberos_client_test.go000066400000000000000000000134561461256741300231210ustar00rootroot00000000000000package sarama import ( "errors" "testing" krbcfg "github.com/jcmturner/gokrb5/v8/config" ) /* * Minimum requirement for client creation * we are not testing the client itself, we only test that the client is created * properly. * */ const ( krb5cfg = `[libdefaults] default_realm = TEST.GOKRB5 dns_lookup_realm = false dns_lookup_kdc = false ticket_lifetime = 24h forwardable = yes default_tkt_enctypes = aes256-cts-hmac-sha1-96 default_tgs_enctypes = aes256-cts-hmac-sha1-96 noaddresses = false [realms] TEST.GOKRB5 = { kdc = 127.0.0.1:88 admin_server = 127.0.0.1:749 default_domain = test.gokrb5 } RESDOM.GOKRB5 = { kdc = 10.80.88.88:188 admin_server = 127.0.0.1:749 default_domain = resdom.gokrb5 } USER.GOKRB5 = { kdc = 192.168.88.100:88 admin_server = 192.168.88.100:464 default_domain = user.gokrb5 } RES.GOKRB5 = { kdc = 192.168.88.101:88 admin_server = 192.168.88.101:464 default_domain = res.gokrb5 } [domain_realm] .test.gokrb5 = TEST.GOKRB5 test.gokrb5 = TEST.GOKRB5 .resdom.gokrb5 = RESDOM.GOKRB5 resdom.gokrb5 = RESDOM.GOKRB5 .user.gokrb5 = USER.GOKRB5 user.gokrb5 = USER.GOKRB5 .res.gokrb5 = RES.GOKRB5 res.gokrb5 = RES.GOKRB5 ` ) func TestFaildToCreateKerberosConfig(t *testing.T) { expectedErr := errors.New("configuration file could not be opened: krb5.conf open krb5.conf: no such file or directory") clientConfig := NewTestConfig() clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI clientConfig.Net.SASL.Enable = true clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" clientConfig.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" clientConfig.Net.SASL.GSSAPI.Username = "client" clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH clientConfig.Net.SASL.GSSAPI.Password = "qwerty" clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "krb5.conf" _, err := NewKerberosClient(&clientConfig.Net.SASL.GSSAPI) // Expect to create client with password if err.Error() != expectedErr.Error() { t.Errorf("Expected error:%s, got:%s.", err, expectedErr) } } func TestCreateWithPassword(t *testing.T) { kerberosConfig, err := krbcfg.NewFromString(krb5cfg) if err != nil { t.Fatal(err) } expectedDoman := "EXAMPLE.COM" expectedCName := "client" clientConfig := NewTestConfig() clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI clientConfig.Net.SASL.Enable = true clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" clientConfig.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" clientConfig.Net.SASL.GSSAPI.Username = "client" clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH clientConfig.Net.SASL.GSSAPI.Password = "qwerty" clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" client, _ := createClient(&clientConfig.Net.SASL.GSSAPI, kerberosConfig) // Expect to create client with password if client == nil { t.Errorf("Expected client not nil") } if client.Domain() != expectedDoman { t.Errorf("Client domain: %s, got: %s", expectedDoman, client.Domain()) } if client.CName().NameString[0] != expectedCName { t.Errorf("Client domain:%s, got: %s", expectedCName, client.CName().NameString[0]) } } func TestCreateWithKeyTab(t *testing.T) { kerberosConfig, err := krbcfg.NewFromString(krb5cfg) if err != nil { t.Fatal(err) } // Expect to try to create a client with keytab and fails with "o such file or directory" error expectedErr := errors.New("open nonexist.keytab: no such file or directory") clientConfig := NewTestConfig() clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI clientConfig.Net.SASL.Enable = true clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" clientConfig.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" clientConfig.Net.SASL.GSSAPI.Username = "client" clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH clientConfig.Net.SASL.GSSAPI.KeyTabPath = "nonexist.keytab" clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" _, err = createClient(&clientConfig.Net.SASL.GSSAPI, kerberosConfig) if err.Error() != expectedErr.Error() { t.Errorf("Expected error:%s, got:%s.", err, expectedErr) } } func TestCreateWithCredentialsCache(t *testing.T) { kerberosConfig, err := krbcfg.NewFromString(krb5cfg) if err != nil { t.Fatal(err) } // Expect to try to create a client with a credentials cache and fails with "o such file or directory" error expectedErr := errors.New("open nonexist.ccache: no such file or directory") clientConfig := NewTestConfig() clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI clientConfig.Net.SASL.Enable = true clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH clientConfig.Net.SASL.GSSAPI.CCachePath = "nonexist.ccache" clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" _, err = createClient(&clientConfig.Net.SASL.GSSAPI, kerberosConfig) if err.Error() != expectedErr.Error() { t.Errorf("Expected error:%s, got:%s.", err, expectedErr) } } func TestCreateWithDisablePAFXFAST(t *testing.T) { kerberosConfig, err := krbcfg.NewFromString(krb5cfg) if err != nil { t.Fatal(err) } // Expect to try to create a client with keytab and fails with "o such file or directory" error expectedErr := errors.New("open nonexist.keytab: no such file or directory") clientConfig := NewTestConfig() clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI clientConfig.Net.SASL.Enable = true clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" clientConfig.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" clientConfig.Net.SASL.GSSAPI.Username = "client" clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH clientConfig.Net.SASL.GSSAPI.KeyTabPath = "nonexist.keytab" clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" clientConfig.Net.SASL.GSSAPI.DisablePAFXFAST = true _, err = createClient(&clientConfig.Net.SASL.GSSAPI, kerberosConfig) if err.Error() != expectedErr.Error() { t.Errorf("Expected error:%s, got:%s.", err, expectedErr) } } golang-github-ibm-sarama-1.43.2/leave_group_request.go000066400000000000000000000037671461256741300227740ustar00rootroot00000000000000package sarama type MemberIdentity struct { MemberId string GroupInstanceId *string } type LeaveGroupRequest struct { Version int16 GroupId string MemberId string // Removed in Version 3 Members []MemberIdentity // Added in Version 3 } func (r *LeaveGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.GroupId); err != nil { return err } if r.Version < 3 { if err := pe.putString(r.MemberId); err != nil { return err } } if r.Version >= 3 { if err := pe.putArrayLength(len(r.Members)); err != nil { return err } for _, member := range r.Members { if err := pe.putString(member.MemberId); err != nil { return err } if err := pe.putNullableString(member.GroupInstanceId); err != nil { return err } } } return nil } func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } if r.Version < 3 { if r.MemberId, err = pd.getString(); err != nil { return } } if r.Version >= 3 { memberCount, err := pd.getArrayLength() if err != nil { return err } r.Members = make([]MemberIdentity, memberCount) for i := 0; i < memberCount; i++ { memberIdentity := MemberIdentity{} if memberIdentity.MemberId, err = pd.getString(); err != nil { return err } if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } r.Members[i] = memberIdentity } } return nil } func (r *LeaveGroupRequest) key() int16 { return 13 } func (r *LeaveGroupRequest) version() int16 { return r.Version } func (r *LeaveGroupRequest) headerVersion() int16 { return 1 } func (r *LeaveGroupRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_4_0_0 } } golang-github-ibm-sarama-1.43.2/leave_group_request_test.go000066400000000000000000000023771461256741300240270ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( basicLeaveGroupRequestV0 = []byte{ 0, 3, 'f', 'o', 'o', 0, 3, 'b', 'a', 'r', } basicLeaveGroupRequestV3 = []byte{ 0, 3, 'f', 'o', 'o', 0, 0, 0, 2, // Two Member 0, 4, 'm', 'i', 'd', '1', // MemberId 255, 255, // GroupInstanceId nil 0, 4, 'm', 'i', 'd', '2', // MemberId 0, 3, 'g', 'i', 'd', // GroupInstanceId } ) func TestLeaveGroupRequest(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *LeaveGroupRequest }{ { "v0", 0, basicLeaveGroupRequestV0, &LeaveGroupRequest{ Version: 0, GroupId: "foo", MemberId: "bar", }, }, { "v3", 3, basicLeaveGroupRequestV3, &LeaveGroupRequest{ Version: 3, GroupId: "foo", Members: []MemberIdentity{ {"mid1", nil}, {"mid2", &groupInstanceId}, }, }, }, } for _, c := range tests { request := new(LeaveGroupRequest) testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, request) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/leave_group_response.go000066400000000000000000000042321461256741300231260ustar00rootroot00000000000000package sarama import "time" type MemberResponse struct { MemberId string GroupInstanceId *string Err KError } type LeaveGroupResponse struct { Version int16 ThrottleTime int32 Err KError Members []MemberResponse } func (r *LeaveGroupResponse) encode(pe packetEncoder) error { if r.Version >= 1 { pe.putInt32(r.ThrottleTime) } pe.putInt16(int16(r.Err)) if r.Version >= 3 { if err := pe.putArrayLength(len(r.Members)); err != nil { return err } for _, member := range r.Members { if err := pe.putString(member.MemberId); err != nil { return err } if err := pe.putNullableString(member.GroupInstanceId); err != nil { return err } pe.putInt16(int16(member.Err)) } } return nil } func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 1 { if r.ThrottleTime, err = pd.getInt32(); err != nil { return err } } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) if r.Version >= 3 { membersLen, err := pd.getArrayLength() if err != nil { return err } r.Members = make([]MemberResponse, membersLen) for i := 0; i < len(r.Members); i++ { if r.Members[i].MemberId, err = pd.getString(); err != nil { return err } if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil { return err } if memberErr, err := pd.getInt16(); err != nil { return err } else { r.Members[i].Err = KError(memberErr) } } } return nil } func (r *LeaveGroupResponse) key() int16 { return 13 } func (r *LeaveGroupResponse) version() int16 { return r.Version } func (r *LeaveGroupResponse) headerVersion() int16 { return 0 } func (r *LeaveGroupResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_4_0_0 } } func (r *LeaveGroupResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTime) * time.Millisecond } golang-github-ibm-sarama-1.43.2/leave_group_response_test.go000066400000000000000000000034461461256741300241730ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( leaveGroupResponseV0NoError = []byte{0x00, 0x00} leaveGroupResponseV0WithError = []byte{0, 25} leaveGroupResponseV1NoError = []byte{ 0, 0, 0, 100, // ThrottleTime 0x00, 0x00, // Err } leaveGroupResponseV3NoError = []byte{ 0, 0, 0, 100, // ThrottleTime 0x00, 0x00, // Err 0, 0, 0, 2, // Two Members 0, 4, 'm', 'i', 'd', '1', // MemberId 255, 255, // GroupInstanceId 0, 0, // Err 0, 4, 'm', 'i', 'd', '2', // MemberId 0, 3, 'g', 'i', 'd', // GroupInstanceId 0, 25, // Err } ) func TestLeaveGroupResponse(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *LeaveGroupResponse }{ { "v0-noErr", 0, leaveGroupResponseV0NoError, &LeaveGroupResponse{ Version: 0, Err: ErrNoError, }, }, { "v0-Err", 0, leaveGroupResponseV0WithError, &LeaveGroupResponse{ Version: 0, Err: ErrUnknownMemberId, }, }, { "v1-noErr", 1, leaveGroupResponseV1NoError, &LeaveGroupResponse{ Version: 1, ThrottleTime: 100, Err: ErrNoError, }, }, { "v3", 3, leaveGroupResponseV3NoError, &LeaveGroupResponse{ Version: 3, ThrottleTime: 100, Err: ErrNoError, Members: []MemberResponse{ {"mid1", nil, ErrNoError}, {"mid2", &groupInstanceId, ErrUnknownMemberId}, }, }, }, } for _, c := range tests { response := new(LeaveGroupResponse) testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/length_field.go000066400000000000000000000040641461256741300213270ustar00rootroot00000000000000package sarama import ( "encoding/binary" "sync" ) // LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. type lengthField struct { startOffset int length int32 } var lengthFieldPool = sync.Pool{} func acquireLengthField() *lengthField { val := lengthFieldPool.Get() if val != nil { return val.(*lengthField) } return &lengthField{} } func releaseLengthField(m *lengthField) { lengthFieldPool.Put(m) } func (l *lengthField) decode(pd packetDecoder) error { var err error l.length, err = pd.getInt32() if err != nil { return err } if l.length > int32(pd.remaining()) { return ErrInsufficientData } return nil } func (l *lengthField) saveOffset(in int) { l.startOffset = in } func (l *lengthField) reserveLength() int { return 4 } func (l *lengthField) run(curOffset int, buf []byte) error { binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) return nil } func (l *lengthField) check(curOffset int, buf []byte) error { if int32(curOffset-l.startOffset-4) != l.length { return PacketDecodingError{"length field invalid"} } return nil } type varintLengthField struct { startOffset int length int64 } func (l *varintLengthField) decode(pd packetDecoder) error { var err error l.length, err = pd.getVarint() return err } func (l *varintLengthField) saveOffset(in int) { l.startOffset = in } func (l *varintLengthField) adjustLength(currOffset int) int { oldFieldSize := l.reserveLength() l.length = int64(currOffset - l.startOffset - oldFieldSize) return l.reserveLength() - oldFieldSize } func (l *varintLengthField) reserveLength() int { var tmp [binary.MaxVarintLen64]byte return binary.PutVarint(tmp[:], l.length) } func (l *varintLengthField) run(curOffset int, buf []byte) error { binary.PutVarint(buf[l.startOffset:], l.length) return nil } func (l *varintLengthField) check(curOffset int, buf []byte) error { if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { return PacketDecodingError{"length field invalid"} } return nil } golang-github-ibm-sarama-1.43.2/list_groups_request.go000066400000000000000000000030031461256741300230150ustar00rootroot00000000000000package sarama type ListGroupsRequest struct { Version int16 StatesFilter []string // version 4 or later } func (r *ListGroupsRequest) encode(pe packetEncoder) error { if r.Version >= 4 { pe.putCompactArrayLength(len(r.StatesFilter)) for _, filter := range r.StatesFilter { err := pe.putCompactString(filter) if err != nil { return err } } } if r.Version >= 3 { pe.putEmptyTaggedFieldArray() } return nil } func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 4 { filterLen, err := pd.getCompactArrayLength() if err != nil { return err } if filterLen > 0 { r.StatesFilter = make([]string, filterLen) for i := 0; i < filterLen; i++ { if r.StatesFilter[i], err = pd.getCompactString(); err != nil { return err } } } } if r.Version >= 3 { if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *ListGroupsRequest) key() int16 { return 16 } func (r *ListGroupsRequest) version() int16 { return r.Version } func (r *ListGroupsRequest) headerVersion() int16 { if r.Version >= 3 { return 2 } return 1 } func (r *ListGroupsRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *ListGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_6_0_0 case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_6_0_0 } } golang-github-ibm-sarama-1.43.2/list_groups_request_test.go000066400000000000000000000015151461256741300240620ustar00rootroot00000000000000package sarama import "testing" func TestListGroupsRequest(t *testing.T) { testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{}) testRequest(t, "ListGroupsRequest", &ListGroupsRequest{ Version: 1, }, []byte{}) testRequest(t, "ListGroupsRequest", &ListGroupsRequest{ Version: 2, }, []byte{}) testRequest(t, "ListGroupsRequest", &ListGroupsRequest{ Version: 3, }, []byte{ 0, // 0, // empty tag buffer }) testRequest(t, "ListGroupsRequest", &ListGroupsRequest{ Version: 4, }, []byte{ 1, // compact array length (0) 0, // empty tag buffer }) testRequest(t, "ListGroupsRequest", &ListGroupsRequest{ Version: 4, StatesFilter: []string{"Empty"}, }, []byte{ 2, // compact array length (1) 6, 'E', 'm', 'p', 't', 'y', // compact string 0, // empty tag buffer }) } golang-github-ibm-sarama-1.43.2/list_groups_response.go000066400000000000000000000061171461256741300231740ustar00rootroot00000000000000package sarama type ListGroupsResponse struct { Version int16 ThrottleTime int32 Err KError Groups map[string]string GroupsData map[string]GroupData // version 4 or later } type GroupData struct { GroupState string // version 4 or later } func (r *ListGroupsResponse) encode(pe packetEncoder) error { if r.Version >= 1 { pe.putInt32(r.ThrottleTime) } pe.putInt16(int16(r.Err)) if r.Version <= 2 { if err := pe.putArrayLength(len(r.Groups)); err != nil { return err } for groupId, protocolType := range r.Groups { if err := pe.putString(groupId); err != nil { return err } if err := pe.putString(protocolType); err != nil { return err } } } else { pe.putCompactArrayLength(len(r.Groups)) for groupId, protocolType := range r.Groups { if err := pe.putCompactString(groupId); err != nil { return err } if err := pe.putCompactString(protocolType); err != nil { return err } if r.Version >= 4 { groupData := r.GroupsData[groupId] if err := pe.putCompactString(groupData.GroupState); err != nil { return err } } } } return nil } func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { r.Version = version if r.Version >= 1 { var err error if r.ThrottleTime, err = pd.getInt32(); err != nil { return err } } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) var n int if r.Version <= 2 { n, err = pd.getArrayLength() } else { n, err = pd.getCompactArrayLength() } if err != nil { return err } for i := 0; i < n; i++ { if i == 0 { r.Groups = make(map[string]string) if r.Version >= 4 { r.GroupsData = make(map[string]GroupData) } } var groupId, protocolType string if r.Version <= 2 { groupId, err = pd.getString() if err != nil { return err } protocolType, err = pd.getString() if err != nil { return err } } else { groupId, err = pd.getCompactString() if err != nil { return err } protocolType, err = pd.getCompactString() if err != nil { return err } } r.Groups[groupId] = protocolType if r.Version >= 4 { groupState, err := pd.getCompactString() if err != nil { return err } r.GroupsData[groupId] = GroupData{ GroupState: groupState, } } if r.Version >= 3 { if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if r.Version >= 3 { if _, err = pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *ListGroupsResponse) key() int16 { return 16 } func (r *ListGroupsResponse) version() int16 { return r.Version } func (r *ListGroupsResponse) headerVersion() int16 { if r.Version >= 3 { return 1 } return 0 } func (r *ListGroupsResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *ListGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_6_0_0 case 3: return V2_4_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_6_0_0 } } golang-github-ibm-sarama-1.43.2/list_groups_response_test.go000066400000000000000000000044551461256741300242360ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var ( listGroupsResponseEmpty = []byte{ 0, 0, // no error 0, 0, 0, 0, // no groups } listGroupsResponseError = []byte{ 0, 31, // no error 0, 0, 0, 0, // ErrClusterAuthorizationFailed } listGroupsResponseWithConsumer = []byte{ 0, 0, // no error 0, 0, 0, 1, // 1 group 0, 3, 'f', 'o', 'o', // group name 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type } listGroupResponseV4 = []byte{ 0, 0, 0, 0, // no throttle time 0, 0, // no error 2, // compact array length (1) 4, 'f', 'o', 'o', // group name (compact string) 9, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type (compact string) 6, 'E', 'm', 'p', 't', 'y', // state (compact string) 0, // Empty tag buffer 0, // Empty tag buffer } ) func TestListGroupsResponse(t *testing.T) { var response *ListGroupsResponse response = new(ListGroupsResponse) testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0) if !errors.Is(response.Err, ErrNoError) { t.Error("Expected no gerror, found:", response.Err) } if len(response.Groups) != 0 { t.Error("Expected no groups") } response = new(ListGroupsResponse) testVersionDecodable(t, "no error", response, listGroupsResponseError, 0) if !errors.Is(response.Err, ErrClusterAuthorizationFailed) { t.Error("Expected no gerror, found:", response.Err) } if len(response.Groups) != 0 { t.Error("Expected no groups") } response = new(ListGroupsResponse) testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0) if !errors.Is(response.Err, ErrNoError) { t.Error("Expected no gerror, found:", response.Err) } if len(response.Groups) != 1 { t.Error("Expected one group") } if response.Groups["foo"] != "consumer" { t.Error("Expected foo group to use consumer protocol") } response = new(ListGroupsResponse) testVersionDecodable(t, "no error", response, listGroupResponseV4, 4) if !errors.Is(response.Err, ErrNoError) { t.Error("Expected no gerror, found:", response.Err) } if len(response.Groups) != 1 { t.Error("Expected one group") } if response.Groups["foo"] != "consumer" { t.Error("Expected foo group to use consumer protocol") } if response.GroupsData["foo"].GroupState != "Empty" { t.Error("Expected foo grup to have empty state") } } golang-github-ibm-sarama-1.43.2/list_partition_reassignments_request.go000066400000000000000000000041121461256741300264530ustar00rootroot00000000000000package sarama type ListPartitionReassignmentsRequest struct { TimeoutMs int32 blocks map[string][]int32 Version int16 } func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { pe.putInt32(r.TimeoutMs) pe.putCompactArrayLength(len(r.blocks)) for topic, partitions := range r.blocks { if err := pe.putCompactString(topic); err != nil { return err } if err := pe.putCompactInt32Array(partitions); err != nil { return err } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.TimeoutMs, err = pd.getInt32(); err != nil { return err } topicCount, err := pd.getCompactArrayLength() if err != nil { return err } if topicCount > 0 { r.blocks = make(map[string][]int32) for i := 0; i < topicCount; i++ { topic, err := pd.getCompactString() if err != nil { return err } partitionCount, err := pd.getCompactArrayLength() if err != nil { return err } r.blocks[topic] = make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } r.blocks[topic][j] = partition } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return } func (r *ListPartitionReassignmentsRequest) key() int16 { return 46 } func (r *ListPartitionReassignmentsRequest) version() int16 { return r.Version } func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { return 2 } func (r *ListPartitionReassignmentsRequest) isValidVersion() bool { return r.Version == 0 } func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { if r.blocks == nil { r.blocks = make(map[string][]int32) } if r.blocks[topic] == nil { r.blocks[topic] = partitionIDs } } golang-github-ibm-sarama-1.43.2/list_partition_reassignments_request_test.go000066400000000000000000000013611461256741300275150ustar00rootroot00000000000000package sarama import "testing" var listPartitionReassignmentsRequestOneBlock = []byte{ 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions 0, 0, 0, 0, // partitionId 0, 0, // empty tagged fields } func TestListPartitionReassignmentRequest(t *testing.T) { var request *ListPartitionReassignmentsRequest = &ListPartitionReassignmentsRequest{ TimeoutMs: int32(10000), Version: int16(0), } request.AddBlock("topic", []int32{0}) testRequest(t, "one block", request, listPartitionReassignmentsRequestOneBlock) request.AddBlock("topic2", []int32{1, 2}) testRequestWithoutByteComparison(t, "two blocks", request) } golang-github-ibm-sarama-1.43.2/list_partition_reassignments_response.go000066400000000000000000000103241461256741300266230ustar00rootroot00000000000000package sarama import "time" type PartitionReplicaReassignmentsStatus struct { Replicas []int32 AddingReplicas []int32 RemovingReplicas []int32 } func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { if err := pe.putCompactInt32Array(b.Replicas); err != nil { return err } if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { return err } if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { return err } pe.putEmptyTaggedFieldArray() return nil } func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { if b.Replicas, err = pd.getCompactInt32Array(); err != nil { return err } if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { return err } if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { return err } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return err } type ListPartitionReassignmentsResponse struct { Version int16 ThrottleTimeMs int32 ErrorCode KError ErrorMessage *string TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus } func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { if r.TopicStatus == nil { r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) } partitions := r.TopicStatus[topic] if partitions == nil { partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) r.TopicStatus[topic] = partitions } partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} } func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { pe.putInt32(r.ThrottleTimeMs) pe.putInt16(int16(r.ErrorCode)) if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { return err } pe.putCompactArrayLength(len(r.TopicStatus)) for topic, partitions := range r.TopicStatus { if err := pe.putCompactString(topic); err != nil { return err } pe.putCompactArrayLength(len(partitions)) for partition, block := range partitions { pe.putInt32(partition) if err := block.encode(pe); err != nil { return err } } pe.putEmptyTaggedFieldArray() } pe.putEmptyTaggedFieldArray() return nil } func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } kerr, err := pd.getInt16() if err != nil { return err } r.ErrorCode = KError(kerr) if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { return err } numTopics, err := pd.getCompactArrayLength() if err != nil { return err } r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) for i := 0; i < numTopics; i++ { topic, err := pd.getCompactString() if err != nil { return err } ongoingPartitionReassignments, err := pd.getCompactArrayLength() if err != nil { return err } r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) for j := 0; j < ongoingPartitionReassignments; j++ { partition, err := pd.getInt32() if err != nil { return err } block := &PartitionReplicaReassignmentsStatus{} if err := block.decode(pd); err != nil { return err } r.TopicStatus[topic][partition] = block } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } return nil } func (r *ListPartitionReassignmentsResponse) key() int16 { return 46 } func (r *ListPartitionReassignmentsResponse) version() int16 { return r.Version } func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { return 1 } func (r *ListPartitionReassignmentsResponse) isValidVersion() bool { return r.Version == 0 } func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } golang-github-ibm-sarama-1.43.2/list_partition_reassignments_response_test.go000066400000000000000000000017001461256741300276600ustar00rootroot00000000000000package sarama import "testing" var listPartitionReassignmentsResponse = []byte{ 0, 0, 39, 16, // ThrottleTimeMs 10000 0, 0, // errorcode 0, // null string 2, // block array length 1 6, 116, 111, 112, 105, 99, // topic name "topic" 2, // partition array length 1 0, 0, 0, 1, // partitionId 3, 0, 0, 3, 232, 0, 0, 3, 233, // replicas [1000, 1001] 3, 0, 0, 3, 234, 0, 0, 3, 235, // addingReplicas [1002, 1003] 3, 0, 0, 3, 236, 0, 0, 3, 237, // addingReplicas [1004, 1005] 0, 0, 0, // empty tagged fields } func TestListPartitionReassignmentResponse(t *testing.T) { var response *ListPartitionReassignmentsResponse = &ListPartitionReassignmentsResponse{ ThrottleTimeMs: int32(10000), Version: int16(0), } response.AddBlock("topic", 1, []int32{1000, 1001}, []int32{1002, 1003}, []int32{1004, 1005}) testResponse(t, "one topic", response, listPartitionReassignmentsResponse) } golang-github-ibm-sarama-1.43.2/logger_test.go000066400000000000000000000012061461256741300212140ustar00rootroot00000000000000package sarama import "testing" // testLogger implements the StdLogger interface and records the text in the // logs of the given T passed from Test functions. // and records the text in the error log. // // nolint:unused type testLogger struct { t *testing.T } // nolint:unused func (l *testLogger) Print(v ...interface{}) { if l.t != nil { l.t.Helper() l.t.Log(v...) } } // nolint:unused func (l *testLogger) Printf(format string, v ...interface{}) { if l.t != nil { l.t.Helper() l.t.Logf(format, v...) } } // nolint:unused func (l *testLogger) Println(v ...interface{}) { if l.t != nil { l.t.Helper() l.t.Log(v...) } } golang-github-ibm-sarama-1.43.2/message.go000066400000000000000000000110561461256741300203260ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) const ( // CompressionNone no compression CompressionNone CompressionCodec = iota // CompressionGZIP compression using GZIP CompressionGZIP // CompressionSnappy compression using snappy CompressionSnappy // CompressionLZ4 compression using LZ4 CompressionLZ4 // CompressionZSTD compression using ZSTD CompressionZSTD // The lowest 3 bits contain the compression codec used for the message compressionCodecMask int8 = 0x07 // Bit 3 set for "LogAppend" timestamps timestampTypeMask = 0x08 // CompressionLevelDefault is the constant to use in CompressionLevel // to have the default compression level for any codec. The value is picked // that we don't use any existing compression levels. CompressionLevelDefault = -1000 ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. type CompressionCodec int8 func (cc CompressionCodec) String() string { return []string{ "none", "gzip", "snappy", "lz4", "zstd", }[int(cc)] } // UnmarshalText returns a CompressionCodec from its string representation. func (cc *CompressionCodec) UnmarshalText(text []byte) error { codecs := map[string]CompressionCodec{ "none": CompressionNone, "gzip": CompressionGZIP, "snappy": CompressionSnappy, "lz4": CompressionLZ4, "zstd": CompressionZSTD, } codec, ok := codecs[string(text)] if !ok { return fmt.Errorf("cannot parse %q as a compression codec", string(text)) } *cc = codec return nil } // MarshalText transforms a CompressionCodec into its string representation. func (cc CompressionCodec) MarshalText() ([]byte, error) { return []byte(cc.String()), nil } // Message is a kafka message type type Message struct { Codec CompressionCodec // codec used to compress the message contents CompressionLevel int // compression level LogAppendTime bool // the used timestamp is LogAppendTime Key []byte // the message key, may be nil Value []byte // the message contents Set *MessageSet // the message set a message might wrap Version int8 // v1 requires Kafka 0.10 Timestamp time.Time // the timestamp of the message (version 1+ only) compressedCache []byte compressedSize int // used for computing the compression ratio metrics } func (m *Message) encode(pe packetEncoder) error { pe.push(newCRC32Field(crcIEEE)) pe.putInt8(m.Version) attributes := int8(m.Codec) & compressionCodecMask if m.LogAppendTime { attributes |= timestampTypeMask } pe.putInt8(attributes) if m.Version >= 1 { if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { return err } } err := pe.putBytes(m.Key) if err != nil { return err } var payload []byte if m.compressedCache != nil { payload = m.compressedCache m.compressedCache = nil } else if m.Value != nil { payload, err = compress(m.Codec, m.CompressionLevel, m.Value) if err != nil { return err } m.compressedCache = payload // Keep in mind the compressed payload size for metric gathering m.compressedSize = len(payload) } if err = pe.putBytes(payload); err != nil { return err } return pe.pop() } func (m *Message) decode(pd packetDecoder) (err error) { crc32Decoder := acquireCrc32Field(crcIEEE) defer releaseCrc32Field(crc32Decoder) err = pd.push(crc32Decoder) if err != nil { return err } m.Version, err = pd.getInt8() if err != nil { return err } if m.Version > 1 { return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} } attribute, err := pd.getInt8() if err != nil { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask if m.Version == 1 { if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { return err } } m.Key, err = pd.getBytes() if err != nil { return err } m.Value, err = pd.getBytes() if err != nil { return err } // Required for deep equal assertion during tests but might be useful // for future metrics about the compression ratio in fetch requests m.compressedSize = len(m.Value) if m.Value != nil && m.Codec != CompressionNone { m.Value, err = decompress(m.Codec, m.Value) if err != nil { return err } if err := m.decodeSet(); err != nil { return err } } return pd.pop() } // decodes a message set from a previously encoded bulk-message func (m *Message) decodeSet() (err error) { pd := realDecoder{raw: m.Value} m.Set = &MessageSet{} return m.Set.decode(&pd) } golang-github-ibm-sarama-1.43.2/message_set.go000066400000000000000000000045621461256741300212050ustar00rootroot00000000000000package sarama import "errors" type MessageBlock struct { Offset int64 Msg *Message } // Messages convenience helper which returns either all the // messages that are wrapped in this block func (msb *MessageBlock) Messages() []*MessageBlock { if msb.Msg.Set != nil { return msb.Msg.Set.Messages } return []*MessageBlock{msb} } func (msb *MessageBlock) encode(pe packetEncoder) error { pe.putInt64(msb.Offset) pe.push(&lengthField{}) err := msb.Msg.encode(pe) if err != nil { return err } return pe.pop() } func (msb *MessageBlock) decode(pd packetDecoder) (err error) { if msb.Offset, err = pd.getInt64(); err != nil { return err } lengthDecoder := acquireLengthField() defer releaseLengthField(lengthDecoder) if err = pd.push(lengthDecoder); err != nil { return err } msb.Msg = new(Message) if err = msb.Msg.decode(pd); err != nil { return err } if err = pd.pop(); err != nil { return err } return nil } type MessageSet struct { PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock OverflowMessage bool // whether the set on the wire contained an overflow message Messages []*MessageBlock } func (ms *MessageSet) encode(pe packetEncoder) error { for i := range ms.Messages { err := ms.Messages[i].encode(pe) if err != nil { return err } } return nil } func (ms *MessageSet) decode(pd packetDecoder) (err error) { ms.Messages = nil for pd.remaining() > 0 { magic, err := magicValue(pd) if err != nil { if errors.Is(err, ErrInsufficientData) { ms.PartialTrailingMessage = true return nil } return err } if magic > 1 { return nil } msb := new(MessageBlock) err = msb.decode(pd) if err == nil { ms.Messages = append(ms.Messages, msb) } else if errors.Is(err, ErrInsufficientData) { // As an optimization the server is allowed to return a partial message at the // end of the message set. Clients should handle this case. So we just ignore such things. if msb.Offset == -1 { // This is an overflow message caused by chunked down conversion ms.OverflowMessage = true } else { ms.PartialTrailingMessage = true } return nil } else { return err } } return nil } func (ms *MessageSet) addMessage(msg *Message) { block := new(MessageBlock) block.Msg = msg ms.Messages = append(ms.Messages, block) } golang-github-ibm-sarama-1.43.2/message_test.go000066400000000000000000000221551461256741300213670ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( emptyMessage = []byte{ 167, 236, 104, 3, // CRC 0x00, // magic version byte 0x00, // attribute flags 0xFF, 0xFF, 0xFF, 0xFF, // key 0xFF, 0xFF, 0xFF, 0xFF, } // value emptyV1Message = []byte{ 204, 47, 121, 217, // CRC 0x01, // magic version byte 0x00, // attribute flags 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp 0xFF, 0xFF, 0xFF, 0xFF, // key 0xFF, 0xFF, 0xFF, 0xFF, } // value emptyV2Message = []byte{ 167, 236, 104, 3, // CRC 0x02, // magic version byte 0x00, // attribute flags 0xFF, 0xFF, 0xFF, 0xFF, // key 0xFF, 0xFF, 0xFF, 0xFF, } // value emptyGzipMessage = []byte{ 196, 46, 92, 177, // CRC 0x00, // magic version byte 0x01, // attribute flags 0xFF, 0xFF, 0xFF, 0xFF, // key // value 0x00, 0x00, 0x00, 0x14, 0x1f, 0x8b, 0x08, 0, 0, 9, 110, 136, 0, 255, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, } emptyLZ4Message = []byte{ 132, 219, 238, 101, // CRC 0x01, // version byte 0x03, // attribute flags: lz4 0, 0, 1, 88, 141, 205, 89, 56, // timestamp 0xFF, 0xFF, 0xFF, 0xFF, // key 0x00, 0x00, 0x00, 0x0f, // len 0x04, 0x22, 0x4D, 0x18, // LZ4 magic number 100, // LZ4 flags: version 01, block independent, content checksum 112, 185, 0, 0, 0, 0, // LZ4 data 5, 93, 204, 2, // LZ4 checksum } emptyZSTDMessage = []byte{ 180, 172, 84, 179, // CRC 0x01, // version byte 0x04, // attribute flags: zstd 0, 0, 1, 88, 141, 205, 89, 56, // timestamp 0xFF, 0xFF, 0xFF, 0xFF, // key 0x00, 0x00, 0x00, 0x09, // len // ZSTD data 0x28, 0xb5, 0x2f, 0xfd, 0x20, 0x00, 0x01, 0x00, 0x00, } emptyBulkSnappyMessage = []byte{ 180, 47, 53, 209, // CRC 0x00, // magic version byte 0x02, // attribute flags 0xFF, 0xFF, 0xFF, 0xFF, // key 0, 0, 0, 42, 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic 0, 0, 0, 1, // min version 0, 0, 0, 1, // default version 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0, } emptyBulkGzipMessage = []byte{ 139, 160, 63, 141, // CRC 0x00, // magic version byte 0x01, // attribute flags 0xFF, 0xFF, 0xFF, 0xFF, // key 0x00, 0x00, 0x00, 0x27, // len 0x1f, 0x8b, // Gzip Magic 0x08, // deflate compressed 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0, } emptyBulkLZ4Message = []byte{ 246, 12, 188, 129, // CRC 0x01, // Version 0x03, // attribute flags (LZ4) 255, 255, 249, 209, 212, 181, 73, 201, // timestamp 0xFF, 0xFF, 0xFF, 0xFF, // key 0x00, 0x00, 0x00, 0x47, // len 0x04, 0x22, 0x4D, 0x18, // magic number lz4 100, // lz4 flags 01100100 // version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00 112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 71, 129, 23, 111, // LZ4 checksum } emptyBulkZSTDMessage = []byte{ 203, 151, 133, 28, // CRC 0x01, // Version 0x04, // attribute flags (ZSTD) 255, 255, 249, 209, 212, 181, 73, 201, // timestamp 0xFF, 0xFF, 0xFF, 0xFF, // key 0x00, 0x00, 0x00, 0x26, // len // ZSTD data 0x28, 0xb5, 0x2f, 0xfd, 0x24, 0x34, 0xcd, 0x0, 0x0, 0x78, 0x0, 0x0, 0xe, 0x79, 0x57, 0x48, 0xe0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x1, 0x3, 0x0, 0x3d, 0xbd, 0x0, 0x3b, 0x15, 0x0, 0xb, 0xd2, 0x34, 0xc1, 0x78, } ) func TestMessageEncoding(t *testing.T) { message := Message{} testEncodable(t, "empty", &message, emptyMessage) message.Value = []byte{} message.Codec = CompressionGZIP testEncodable(t, "empty gzip", &message, emptyGzipMessage) message.Value = []byte{} message.Codec = CompressionLZ4 message.Timestamp = time.Unix(1479847795, 0) message.Version = 1 testEncodable(t, "empty lz4", &message, emptyLZ4Message) message.Value = []byte{} message.Codec = CompressionZSTD message.Timestamp = time.Unix(1479847795, 0) message.Version = 1 testEncodable(t, "empty zstd", &message, emptyZSTDMessage) } func TestMessageDecoding(t *testing.T) { message := Message{} testDecodable(t, "empty", &message, emptyMessage) if message.Codec != CompressionNone { t.Error("Decoding produced compression codec where there was none.") } if message.Key != nil { t.Error("Decoding produced key where there was none.") } if message.Value != nil { t.Error("Decoding produced value where there was none.") } if message.Set != nil { t.Error("Decoding produced set where there was none.") } testDecodable(t, "empty gzip", &message, emptyGzipMessage) if message.Codec != CompressionGZIP { t.Error("Decoding produced incorrect compression codec (was gzip).") } if message.Key != nil { t.Error("Decoding produced key where there was none.") } if message.Value == nil || len(message.Value) != 0 { t.Error("Decoding produced nil or content-ful value where there was an empty array.") } } func TestMessageDecodingBulkSnappy(t *testing.T) { message := Message{} testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) if message.Codec != CompressionSnappy { t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) } if message.Key != nil { t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) } if message.Set == nil { t.Error("Decoding produced no set, but one was expected.") } else if len(message.Set.Messages) != 2 { t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) } } func TestMessageDecodingBulkGzip(t *testing.T) { message := Message{} testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) if message.Codec != CompressionGZIP { t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) } if message.Key != nil { t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) } if message.Set == nil { t.Error("Decoding produced no set, but one was expected.") } else if len(message.Set.Messages) != 2 { t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) } } func TestMessageDecodingBulkLZ4(t *testing.T) { message := Message{} testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message) if message.Codec != CompressionLZ4 { t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4) } if message.Key != nil { t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) } if message.Set == nil { t.Error("Decoding produced no set, but one was expected.") } else if len(message.Set.Messages) != 2 { t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) } } func TestMessageDecodingBulkZSTD(t *testing.T) { message := Message{} testDecodable(t, "bulk zstd", &message, emptyBulkZSTDMessage) if message.Codec != CompressionZSTD { t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionZSTD) } if message.Key != nil { t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) } if message.Set == nil { t.Error("Decoding produced no set, but one was expected.") } else if len(message.Set.Messages) != 2 { t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) } } func TestMessageDecodingVersion1(t *testing.T) { message := Message{Version: 1} testDecodable(t, "decoding empty v1 message", &message, emptyV1Message) } func TestMessageDecodingUnknownVersions(t *testing.T) { message := Message{Version: 2} err := decode(emptyV2Message, &message, nil) if err == nil { t.Error("Decoding did not produce an error for an unknown magic byte") } if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" { t.Error("Decoding an unknown magic byte produced an unknown error ", err) } } func TestCompressionCodecUnmarshal(t *testing.T) { cases := []struct { Input string Expected CompressionCodec ExpectedError bool }{ {"none", CompressionNone, false}, {"zstd", CompressionZSTD, false}, {"gzip", CompressionGZIP, false}, {"unknown", CompressionNone, true}, } for _, c := range cases { var cc CompressionCodec err := cc.UnmarshalText([]byte(c.Input)) if err != nil && !c.ExpectedError { t.Errorf("UnmarshalText(%q) error:\n%+v", c.Input, err) continue } if err == nil && c.ExpectedError { t.Errorf("UnmarshalText(%q) got %v but expected error", c.Input, cc) continue } if cc != c.Expected { t.Errorf("UnmarshalText(%q) got %v but expected %v", c.Input, cc, c.Expected) continue } } } golang-github-ibm-sarama-1.43.2/metadata_request.go000066400000000000000000000122531461256741300222320ustar00rootroot00000000000000package sarama import "encoding/base64" type Uuid [16]byte func (u Uuid) String() string { return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:]) } var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} type MetadataRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // Topics contains the topics to fetch metadata for. Topics []string // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so. AllowAutoTopicCreation bool IncludeClusterAuthorizedOperations bool // version 8 and up IncludeTopicAuthorizedOperations bool // version 8 and up } func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest { m := &MetadataRequest{Topics: topics} if version.IsAtLeast(V2_8_0_0) { m.Version = 10 } else if version.IsAtLeast(V2_4_0_0) { m.Version = 9 } else if version.IsAtLeast(V2_4_0_0) { m.Version = 8 } else if version.IsAtLeast(V2_1_0_0) { m.Version = 7 } else if version.IsAtLeast(V2_0_0_0) { m.Version = 6 } else if version.IsAtLeast(V1_0_0_0) { m.Version = 5 } else if version.IsAtLeast(V0_11_0_0) { m.Version = 4 } else if version.IsAtLeast(V0_10_1_0) { m.Version = 2 } else if version.IsAtLeast(V0_10_0_0) { m.Version = 1 } return m } func (r *MetadataRequest) encode(pe packetEncoder) (err error) { if r.Version < 0 || r.Version > 10 { return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} } if r.Version == 0 || len(r.Topics) > 0 { if r.Version < 9 { err := pe.putArrayLength(len(r.Topics)) if err != nil { return err } for i := range r.Topics { err = pe.putString(r.Topics[i]) if err != nil { return err } } } else if r.Version == 9 { pe.putCompactArrayLength(len(r.Topics)) for _, topicName := range r.Topics { if err := pe.putCompactString(topicName); err != nil { return err } pe.putEmptyTaggedFieldArray() } } else { // r.Version = 10 pe.putCompactArrayLength(len(r.Topics)) for _, topicName := range r.Topics { if err := pe.putRawBytes(NullUUID); err != nil { return err } // Avoid implicit memory aliasing in for loop tn := topicName if err := pe.putNullableCompactString(&tn); err != nil { return err } pe.putEmptyTaggedFieldArray() } } } else { if r.Version < 9 { pe.putInt32(-1) } else { pe.putCompactArrayLength(-1) } } if r.Version > 3 { pe.putBool(r.AllowAutoTopicCreation) } if r.Version > 7 { pe.putBool(r.IncludeClusterAuthorizedOperations) pe.putBool(r.IncludeTopicAuthorizedOperations) } if r.Version > 8 { pe.putEmptyTaggedFieldArray() } return nil } func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version < 9 { size, err := pd.getInt32() if err != nil { return err } if size > 0 { r.Topics = make([]string, size) for i := range r.Topics { topic, err := pd.getString() if err != nil { return err } r.Topics[i] = topic } } } else if r.Version == 9 { size, err := pd.getCompactArrayLength() if err != nil { return err } if size > 0 { r.Topics = make([]string, size) } for i := range r.Topics { topic, err := pd.getCompactString() if err != nil { return err } r.Topics[i] = topic if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } else { // version 10+ size, err := pd.getCompactArrayLength() if err != nil { return err } if size > 0 { r.Topics = make([]string, size) } for i := range r.Topics { if _, err = pd.getRawBytes(16); err != nil { // skip UUID return err } topic, err := pd.getCompactNullableString() if err != nil { return err } if topic != nil { r.Topics[i] = *topic } if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } if r.Version >= 4 { if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil { return err } } if r.Version > 7 { includeClusterAuthz, err := pd.getBool() if err != nil { return err } r.IncludeClusterAuthorizedOperations = includeClusterAuthz includeTopicAuthz, err := pd.getBool() if err != nil { return err } r.IncludeTopicAuthorizedOperations = includeTopicAuthz } if r.Version > 8 { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *MetadataRequest) key() int16 { return 3 } func (r *MetadataRequest) version() int16 { return r.Version } func (r *MetadataRequest) headerVersion() int16 { if r.Version >= 9 { return 2 } return 1 } func (r *MetadataRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 10 } func (r *MetadataRequest) requiredVersion() KafkaVersion { switch r.Version { case 10: return V2_8_0_0 case 9: return V2_4_0_0 case 8: return V2_3_0_0 case 7: return V2_1_0_0 case 6: return V2_0_0_0 case 5: return V1_0_0_0 case 3, 4: return V0_11_0_0 case 2: return V0_10_1_0 case 1: return V0_10_0_0 case 0: return V0_8_2_0 default: return V2_8_0_0 } } golang-github-ibm-sarama-1.43.2/metadata_request_test.go000066400000000000000000000230161461256741300232700ustar00rootroot00000000000000package sarama import "testing" var ( // The v0 metadata request has a non-nullable array of topic names // to request metadata for. An empty array fetches metadata for all topics metadataRequestNoTopicsV0 = []byte{ 0x00, 0x00, 0x00, 0x00, } metadataRequestOneTopicV0 = []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1', } metadataRequestThreeTopicsV0 = []byte{ 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x03, 'b', 'a', 'r', 0x00, 0x03, 'b', 'a', 'z', } // The v1 metadata request is the same as v0 except that the array is now // nullable and should be explicitly null if all topics are required (an // empty list requests no topics) metadataRequestNoTopicsV1 = []byte{ 0xff, 0xff, 0xff, 0xff, } metadataRequestOneTopicV1 = metadataRequestOneTopicV0 metadataRequestThreeTopicsV1 = metadataRequestThreeTopicsV0 // The v2 metadata request is the same as v1. An additional field for // cluster id has been added to the v2 metadata response metadataRequestNoTopicsV2 = metadataRequestNoTopicsV1 metadataRequestOneTopicV2 = metadataRequestOneTopicV1 metadataRequestThreeTopicsV2 = metadataRequestThreeTopicsV1 // The v3 metadata request is the same as v1 and v2. An additional field // for throttle time has been added to the v3 metadata response metadataRequestNoTopicsV3 = metadataRequestNoTopicsV2 metadataRequestOneTopicV3 = metadataRequestOneTopicV2 metadataRequestThreeTopicsV3 = metadataRequestThreeTopicsV2 // The v4 metadata request has an additional field for allowing auto topic // creation. The response is the same as v3. metadataRequestNoTopicsV4 = append(metadataRequestNoTopicsV1, byte(0)) metadataRequestAutoCreateV4 = append(metadataRequestOneTopicV3, byte(1)) metadataRequestNoAutoCreateV4 = append(metadataRequestOneTopicV3, byte(0)) // The v5 metadata request is the same as v4. An additional field for // offline_replicas has been added to the v5 metadata response metadataRequestNoTopicsV5 = append(metadataRequestNoTopicsV1, byte(0)) metadataRequestAutoCreateV5 = append(metadataRequestOneTopicV3, byte(1)) metadataRequestNoAutoCreateV5 = append(metadataRequestOneTopicV3, byte(0)) // The v6 metadata request and response are the same as v5. I know, right. metadataRequestNoTopicsV6 = metadataRequestNoTopicsV5 metadataRequestAutoCreateV6 = metadataRequestAutoCreateV5 metadataRequestNoAutoCreateV6 = metadataRequestNoAutoCreateV5 // The v7 metadata request is the same as v6. An additional field for // leader epoch has been added to the partition metadata in the v7 response. metadataRequestNoTopicsV7 = metadataRequestNoTopicsV6 metadataRequestAutoCreateV7 = metadataRequestAutoCreateV6 metadataRequestNoAutoCreateV7 = metadataRequestNoAutoCreateV6 // The v8 metadata request has additional fields for including cluster authorized operations // and including topic authorized operations. An additional field for cluster authorized operations // has been added to the v8 metadata response, and an additional field for topic authorized operations // has been added to the topic metadata in the v8 metadata response. metadataRequestNoTopicsV8 = append(metadataRequestNoTopicsV7, []byte{0, 0}...) metadataRequestAutoCreateV8 = append(metadataRequestAutoCreateV7, []byte{0, 0}...) metadataRequestNoAutoCreateV8 = append(metadataRequestNoAutoCreateV7, []byte{0, 0}...) // Appending to an empty slice means we are creating a new backing array, rather than updating the backing array // for the slice metadataRequestAutoCreateV7 metadataRequestAutoCreateClusterAuthTopicAuthV8 = append(append([]byte{}, metadataRequestAutoCreateV7...), []byte{1, 1}...) // In v9 tag buffers have been added to the end of arrays, and various types have been replaced with compact types. metadataRequestNoTopicsV9 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, } metadataRequestOneTopicV9 = []byte{ 2, 7, 't', 'o', 'p', 'i', 'c', '1', 0, 0, 0, 0, 0, } metadataRequestOneTopicAutoCreateTopicV9 = []byte{ 2, 7, 't', 'o', 'p', 'i', 'c', '1', 0, 1, 0, 1, 0, } // v10 added topic UUIDs to the metadata request and responses, and made the topic name nullable in the request. metadataRequestNoTopicsV10 = metadataRequestNoTopicsV9 metadataRequestTwoTopicsV10 = []byte{ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 't', 'o', 'p', 'i', 'c', '1', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 't', 'o', 'p', 'i', 'c', '2', 0, 0, 0, 0, 0, } metadataRequestAutoCreateClusterAuthTopicAuthV10 = []byte{ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 't', 'o', 'p', 'i', 'c', '1', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 't', 'o', 'p', 'i', 'c', '2', 0, 1, 1, 1, 0, } ) func TestMetadataRequestV0(t *testing.T) { request := new(MetadataRequest) testRequest(t, "no topics", request, metadataRequestNoTopicsV0) request.Topics = []string{"topic1"} testRequest(t, "one topic", request, metadataRequestOneTopicV0) request.Topics = []string{"foo", "bar", "baz"} testRequest(t, "three topics", request, metadataRequestThreeTopicsV0) } func TestMetadataRequestV1(t *testing.T) { request := new(MetadataRequest) request.Version = 1 testRequest(t, "no topics", request, metadataRequestNoTopicsV1) request.Topics = []string{"topic1"} testRequest(t, "one topic", request, metadataRequestOneTopicV1) request.Topics = []string{"foo", "bar", "baz"} testRequest(t, "three topics", request, metadataRequestThreeTopicsV1) } func TestMetadataRequestV2(t *testing.T) { request := new(MetadataRequest) request.Version = 2 testRequest(t, "no topics", request, metadataRequestNoTopicsV2) request.Topics = []string{"topic1"} testRequest(t, "one topic", request, metadataRequestOneTopicV2) request.Topics = []string{"foo", "bar", "baz"} testRequest(t, "three topics", request, metadataRequestThreeTopicsV2) } func TestMetadataRequestV3(t *testing.T) { request := new(MetadataRequest) request.Version = 3 testRequest(t, "no topics", request, metadataRequestNoTopicsV3) request.Topics = []string{"topic1"} testRequest(t, "one topic", request, metadataRequestOneTopicV3) request.Topics = []string{"foo", "bar", "baz"} testRequest(t, "three topics", request, metadataRequestThreeTopicsV3) } func TestMetadataRequestV4(t *testing.T) { request := new(MetadataRequest) request.Version = 4 testRequest(t, "no topics", request, metadataRequestNoTopicsV4) request.Topics = []string{"topic1"} request.AllowAutoTopicCreation = true testRequest(t, "one topic", request, metadataRequestAutoCreateV4) request.AllowAutoTopicCreation = false testRequest(t, "one topic", request, metadataRequestNoAutoCreateV4) } func TestMetadataRequestV5(t *testing.T) { request := new(MetadataRequest) request.Version = 5 testRequest(t, "no topics", request, metadataRequestNoTopicsV5) request.Topics = []string{"topic1"} request.AllowAutoTopicCreation = true testRequest(t, "one topic", request, metadataRequestAutoCreateV5) request.AllowAutoTopicCreation = false testRequest(t, "one topic", request, metadataRequestNoAutoCreateV5) } func TestMetadataRequestV6(t *testing.T) { request := new(MetadataRequest) request.Version = 6 testRequest(t, "no topics", request, metadataRequestNoTopicsV6) request.Topics = []string{"topic1"} request.AllowAutoTopicCreation = true testRequest(t, "one topic", request, metadataRequestAutoCreateV6) request.AllowAutoTopicCreation = false testRequest(t, "one topic", request, metadataRequestNoAutoCreateV6) } func TestMetadataRequestV7(t *testing.T) { request := new(MetadataRequest) request.Version = 7 testRequest(t, "no topics", request, metadataRequestNoTopicsV7) request.Topics = []string{"topic1"} request.AllowAutoTopicCreation = true testRequest(t, "one topic", request, metadataRequestAutoCreateV7) request.AllowAutoTopicCreation = false testRequest(t, "one topic", request, metadataRequestNoAutoCreateV7) } func TestMetadataRequestV8(t *testing.T) { request := new(MetadataRequest) request.Version = 8 testRequest(t, "no topics", request, metadataRequestNoTopicsV8) request.Topics = []string{"topic1"} request.AllowAutoTopicCreation = true testRequest(t, "one topic, auto create", request, metadataRequestAutoCreateV8) request.AllowAutoTopicCreation = false testRequest(t, "one topic, no auto create", request, metadataRequestNoAutoCreateV8) request.AllowAutoTopicCreation = true request.IncludeClusterAuthorizedOperations = true request.IncludeTopicAuthorizedOperations = true testRequest(t, "one topic, auto create, cluster auth, topic auth", request, metadataRequestAutoCreateClusterAuthTopicAuthV8) } func TestMetadataRequestV9(t *testing.T) { request := new(MetadataRequest) request.Version = 9 testRequest(t, "no topics", request, metadataRequestNoTopicsV9) request.Topics = []string{"topic1"} testRequest(t, "one topic", request, metadataRequestOneTopicV9) request.AllowAutoTopicCreation = true request.IncludeTopicAuthorizedOperations = true testRequest(t, "one topic, auto create, no cluster auth, topic auth", request, metadataRequestOneTopicAutoCreateTopicV9) } func TestMetadataRequestV10(t *testing.T) { request := new(MetadataRequest) request.Version = 10 testRequest(t, "no topics", request, metadataRequestNoTopicsV10) request.Topics = []string{"topic1", "topic2"} testRequest(t, "one topic", request, metadataRequestTwoTopicsV10) request.AllowAutoTopicCreation = true request.IncludeClusterAuthorizedOperations = true request.IncludeTopicAuthorizedOperations = true testRequest(t, "one topic, auto create, cluster auth, topic auth", request, metadataRequestAutoCreateClusterAuthTopicAuthV10) } golang-github-ibm-sarama-1.43.2/metadata_response.go000066400000000000000000000243541461256741300224050ustar00rootroot00000000000000package sarama import "time" // PartitionMetadata contains each partition in the topic. type PartitionMetadata struct { // Version defines the protocol version to use for encode and decode Version int16 // Err contains the partition error, or 0 if there was no error. Err KError // ID contains the partition index. ID int32 // Leader contains the ID of the leader broker. Leader int32 // LeaderEpoch contains the leader epoch of this partition. LeaderEpoch int32 // Replicas contains the set of all nodes that host this partition. Replicas []int32 // Isr contains the set of nodes that are in sync with the leader for this partition. Isr []int32 // OfflineReplicas contains the set of offline replicas of this partition. OfflineReplicas []int32 } func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { p.Version = version tmp, err := pd.getInt16() if err != nil { return err } p.Err = KError(tmp) if p.ID, err = pd.getInt32(); err != nil { return err } if p.Leader, err = pd.getInt32(); err != nil { return err } if p.Version >= 7 { if p.LeaderEpoch, err = pd.getInt32(); err != nil { return err } } if p.Version < 9 { p.Replicas, err = pd.getInt32Array() } else { p.Replicas, err = pd.getCompactInt32Array() } if err != nil { return err } if p.Version < 9 { p.Isr, err = pd.getInt32Array() } else { p.Isr, err = pd.getCompactInt32Array() } if err != nil { return err } if p.Version >= 5 { if p.Version < 9 { p.OfflineReplicas, err = pd.getInt32Array() } else { p.OfflineReplicas, err = pd.getCompactInt32Array() } if err != nil { return err } } if p.Version >= 9 { _, err = pd.getEmptyTaggedFieldArray() if err != nil { return err } } return nil } func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { p.Version = version pe.putInt16(int16(p.Err)) pe.putInt32(p.ID) pe.putInt32(p.Leader) if p.Version >= 7 { pe.putInt32(p.LeaderEpoch) } if p.Version < 9 { err = pe.putInt32Array(p.Replicas) } else { err = pe.putCompactInt32Array(p.Replicas) } if err != nil { return err } if p.Version < 9 { err = pe.putInt32Array(p.Isr) } else { err = pe.putCompactInt32Array(p.Isr) } if err != nil { return err } if p.Version >= 5 { if p.Version < 9 { err = pe.putInt32Array(p.OfflineReplicas) } else { err = pe.putCompactInt32Array(p.OfflineReplicas) } if err != nil { return err } } if p.Version >= 9 { pe.putEmptyTaggedFieldArray() } return nil } // TopicMetadata contains each topic in the response. type TopicMetadata struct { // Version defines the protocol version to use for encode and decode Version int16 // Err contains the topic error, or 0 if there was no error. Err KError // Name contains the topic name. Name string Uuid Uuid // IsInternal contains a True if the topic is internal. IsInternal bool // Partitions contains each partition in the topic. Partitions []*PartitionMetadata TopicAuthorizedOperations int32 // Only valid for Version >= 8 } func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { t.Version = version tmp, err := pd.getInt16() if err != nil { return err } t.Err = KError(tmp) if t.Version < 9 { t.Name, err = pd.getString() } else { t.Name, err = pd.getCompactString() } if err != nil { return err } if t.Version >= 10 { uuid, err := pd.getRawBytes(16) if err != nil { return err } t.Uuid = [16]byte{} for i := 0; i < 16; i++ { t.Uuid[i] = uuid[i] } } if t.Version >= 1 { t.IsInternal, err = pd.getBool() if err != nil { return err } } var n int if t.Version < 9 { n, err = pd.getArrayLength() } else { n, err = pd.getCompactArrayLength() } if err != nil { return err } else { t.Partitions = make([]*PartitionMetadata, n) for i := 0; i < n; i++ { block := &PartitionMetadata{} if err := block.decode(pd, t.Version); err != nil { return err } t.Partitions[i] = block } } if t.Version >= 8 { t.TopicAuthorizedOperations, err = pd.getInt32() if err != nil { return err } } if t.Version >= 9 { _, err = pd.getEmptyTaggedFieldArray() if err != nil { return err } } return nil } func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { t.Version = version pe.putInt16(int16(t.Err)) if t.Version < 9 { err = pe.putString(t.Name) } else { err = pe.putCompactString(t.Name) } if err != nil { return err } if t.Version >= 10 { err = pe.putRawBytes(t.Uuid[:]) if err != nil { return err } } if t.Version >= 1 { pe.putBool(t.IsInternal) } if t.Version < 9 { err = pe.putArrayLength(len(t.Partitions)) if err != nil { return err } } else { pe.putCompactArrayLength(len(t.Partitions)) } for _, block := range t.Partitions { if err := block.encode(pe, t.Version); err != nil { return err } } if t.Version >= 8 { pe.putInt32(t.TopicAuthorizedOperations) } if t.Version >= 9 { pe.putEmptyTaggedFieldArray() } return nil } type MetadataResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ThrottleTimeMs int32 // Brokers contains each broker in the response. Brokers []*Broker // ClusterID contains the cluster ID that responding broker belongs to. ClusterID *string // ControllerID contains the ID of the controller broker. ControllerID int32 // Topics contains each topic in the response. Topics []*TopicMetadata ClusterAuthorizedOperations int32 // Only valid for Version >= 8 } func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 3 { if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } var brokerArrayLen int if r.Version < 9 { brokerArrayLen, err = pd.getArrayLength() } else { brokerArrayLen, err = pd.getCompactArrayLength() } if err != nil { return err } r.Brokers = make([]*Broker, brokerArrayLen) for i := 0; i < brokerArrayLen; i++ { r.Brokers[i] = new(Broker) err = r.Brokers[i].decode(pd, version) if err != nil { return err } } if r.Version >= 2 { if r.Version < 9 { r.ClusterID, err = pd.getNullableString() } else { r.ClusterID, err = pd.getCompactNullableString() } if err != nil { return err } } if r.Version >= 1 { if r.ControllerID, err = pd.getInt32(); err != nil { return err } } var topicArrayLen int if version < 9 { topicArrayLen, err = pd.getArrayLength() } else { topicArrayLen, err = pd.getCompactArrayLength() } if err != nil { return err } r.Topics = make([]*TopicMetadata, topicArrayLen) for i := 0; i < topicArrayLen; i++ { r.Topics[i] = new(TopicMetadata) err = r.Topics[i].decode(pd, version) if err != nil { return err } } if r.Version >= 8 { r.ClusterAuthorizedOperations, err = pd.getInt32() if err != nil { return err } } if r.Version >= 9 { _, err := pd.getEmptyTaggedFieldArray() if err != nil { return err } } return nil } func (r *MetadataResponse) encode(pe packetEncoder) (err error) { if r.Version >= 3 { pe.putInt32(r.ThrottleTimeMs) } if r.Version < 9 { err = pe.putArrayLength(len(r.Brokers)) if err != nil { return err } } else { pe.putCompactArrayLength(len(r.Brokers)) } for _, broker := range r.Brokers { err = broker.encode(pe, r.Version) if err != nil { return err } } if r.Version >= 2 { if r.Version < 9 { err = pe.putNullableString(r.ClusterID) if err != nil { return err } } else { err = pe.putNullableCompactString(r.ClusterID) if err != nil { return err } } } if r.Version >= 1 { pe.putInt32(r.ControllerID) } if r.Version < 9 { err = pe.putArrayLength(len(r.Topics)) } else { pe.putCompactArrayLength(len(r.Topics)) } if err != nil { return err } for _, block := range r.Topics { if err := block.encode(pe, r.Version); err != nil { return err } } if r.Version >= 8 { pe.putInt32(r.ClusterAuthorizedOperations) } if r.Version >= 9 { pe.putEmptyTaggedFieldArray() } return nil } func (r *MetadataResponse) key() int16 { return 3 } func (r *MetadataResponse) version() int16 { return r.Version } func (r *MetadataResponse) headerVersion() int16 { if r.Version < 9 { return 0 } else { return 1 } } func (r *MetadataResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *MetadataResponse) requiredVersion() KafkaVersion { switch r.Version { case 10: return V2_8_0_0 case 9: return V2_4_0_0 case 8: return V2_3_0_0 case 7: return V2_1_0_0 case 6: return V2_0_0_0 case 5: return V1_0_0_0 case 3, 4: return V0_11_0_0 case 2: return V0_10_1_0 case 1: return V0_10_0_0 case 0: return V0_8_2_0 default: return V2_8_0_0 } } func (r *MetadataResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } // testing API func (r *MetadataResponse) AddBroker(addr string, id int32) { r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) } func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { var tmatch *TopicMetadata for _, tm := range r.Topics { if tm.Name == topic { tmatch = tm goto foundTopic } } tmatch = new(TopicMetadata) tmatch.Name = topic r.Topics = append(r.Topics, tmatch) foundTopic: tmatch.Err = err return tmatch } func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { tmatch := r.AddTopic(topic, ErrNoError) var pmatch *PartitionMetadata for _, pm := range tmatch.Partitions { if pm.ID == partition { pmatch = pm goto foundPartition } } pmatch = new(PartitionMetadata) pmatch.ID = partition tmatch.Partitions = append(tmatch.Partitions, pmatch) foundPartition: pmatch.Leader = brokerID pmatch.Replicas = replicas if pmatch.Replicas == nil { pmatch.Replicas = []int32{} } pmatch.Isr = isr if pmatch.Isr == nil { pmatch.Isr = []int32{} } pmatch.OfflineReplicas = offline if pmatch.OfflineReplicas == nil { pmatch.OfflineReplicas = []int32{} } pmatch.Err = err } golang-github-ibm-sarama-1.43.2/metadata_response_test.go000066400000000000000000000473621461256741300234500ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var ( emptyMetadataResponseV0 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } brokersNoTopicsMetadataResponseV0 = []byte{ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xab, 0xff, 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', 0x00, 0x00, 0x00, 0x33, 0x00, 0x01, 0x02, 0x03, 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', 0x00, 0x00, 0x01, 0x11, 0x00, 0x00, 0x00, 0x00, } topicsNoBrokersMetadataResponseV0 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 'b', 'a', 'r', 0x00, 0x00, 0x00, 0x00, } brokersNoTopicsMetadataResponseV1 = []byte{ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xab, 0xff, 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', 0x00, 0x00, 0x00, 0x33, 0x00, 0x05, 'r', 'a', 'c', 'k', '0', 0x00, 0x01, 0x02, 0x03, 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', 0x00, 0x00, 0x01, 0x11, 0x00, 0x05, 'r', 'a', 'c', 'k', '1', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, } topicsNoBrokersMetadataResponseV1 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 'b', 'a', 'r', 0x01, 0x00, 0x00, 0x00, 0x00, } noBrokersNoTopicsWithThrottleTimeAndClusterIDV3 = []byte{ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, } noBrokersOneTopicWithOfflineReplicasV5 = []byte{ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, } OneTopicV6 = []byte{ 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 'h', 'o', 's', 't', 0x00, 0x00, 0x23, 0x84, 0xff, 0xff, 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 't', 'o', 'n', 'y', 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, } OneTopicV7 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 'h', 'o', 's', 't', 0x00, 0x00, 0x23, 0x84, 0xff, 0xff, 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 't', 'o', 'n', 'y', 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, } OneTopicV8 = []byte{ 0x00, 0x00, 0x00, 0x00, // throttle ms 0x00, 0x00, 0x00, 0x01, // length brokers 0x00, 0x00, 0x00, 0x00, // broker[0].nodeid 0x00, 0x04, // brokers[0].length(nodehost) 'h', 'o', 's', 't', // broker[0].nodehost 0x00, 0x00, 0x23, 0x84, // broker[0].port (9092) 0xff, 0xff, // brokers[0].rack (null) 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 't', 'o', 'n', 'y', 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 'Y', 0x00, 0x00, 0x00, 0xea, } OneTopicV9 = []byte{ 0x00, 0x00, 0x00, 0x00, // throttle ms 0x02, // length of brokers 0x00, 0x00, 0x00, 0x00, // broker[0].nodeid 0x05, // length of brokers[0].nodehost 'h', 'o', 's', 't', // brokers[0].nodehost 0x00, 0x00, 0x23, 0x84, // brokers[0].port (9092) 0x00, // brokers[0].rack (null) 0x00, // empty tags 0x0a, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', // cluster id 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x05, 't', 'o', 'n', 'y', 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00, 0x01, 'Y', 0x00, 0x00, 0x00, 0x00, 0xea, 0x00, } OneTopicV10 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x05, 'h', 'o', 's', 't', 0x00, 0x00, 0x23, 0x84, 0x00, 0x00, 0x0a, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x05, 't', 'o', 'n', 'y', 0x84, 0xcd, 0xa7, 'U', 0x7e, 0x84, 'K', 0xf9, 0xb7, 0xdc, 0xfc, 0x11, 0x82, 0x07, 'r', 'J', 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00, 0x01, 'Y', 0x00, 0x00, 0x00, 0x00, 0xea, 0x00, } ) func TestEmptyMetadataResponseV0(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "empty, V0", &response, emptyMetadataResponseV0, 0) if len(response.Brokers) != 0 { t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") } if len(response.Topics) != 0 { t.Error("Decoding produced", len(response.Topics), "topics where there were none!") } } func TestMetadataResponseWithBrokersV0(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "brokers, no topics, V0", &response, brokersNoTopicsMetadataResponseV0, 0) if len(response.Brokers) != 2 { t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") } if response.Brokers[0].id != 0xabff { t.Error("Decoding produced invalid broker 0 id.") } if response.Brokers[0].addr != "localhost:51" { t.Error("Decoding produced invalid broker 0 address.") } if response.Brokers[1].id != 0x010203 { t.Error("Decoding produced invalid broker 1 id.") } if response.Brokers[1].addr != "google.com:273" { t.Error("Decoding produced invalid broker 1 address.") } if len(response.Topics) != 0 { t.Error("Decoding produced", len(response.Topics), "topics where there were none!") } } func TestMetadataResponseWithTopicsV0(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "topics, no brokers, V0", &response, topicsNoBrokersMetadataResponseV0, 0) if len(response.Brokers) != 0 { t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") } if len(response.Topics) != 2 { t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") } if !errors.Is(response.Topics[0].Err, ErrNoError) { t.Error("Decoding produced invalid topic 0 error.") } if response.Topics[0].Name != "foo" { t.Error("Decoding produced invalid topic 0 name.") } if len(response.Topics[0].Partitions) != 1 { t.Fatal("Decoding produced invalid partition count for topic 0.") } if !errors.Is(response.Topics[0].Partitions[0].Err, ErrInvalidMessageSize) { t.Error("Decoding produced invalid topic 0 partition 0 error.") } if response.Topics[0].Partitions[0].ID != 0x01 { t.Error("Decoding produced invalid topic 0 partition 0 id.") } if response.Topics[0].Partitions[0].Leader != 0x07 { t.Error("Decoding produced invalid topic 0 partition 0 leader.") } if len(response.Topics[0].Partitions[0].Replicas) != 3 { t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") } for i := 0; i < 3; i++ { if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { t.Error("Decoding produced invalid topic 0 partition 0 replica", i) } } if len(response.Topics[0].Partitions[0].Isr) != 0 { t.Error("Decoding produced invalid topic 0 partition 0 isr length.") } if !errors.Is(response.Topics[1].Err, ErrNoError) { t.Error("Decoding produced invalid topic 1 error.") } if response.Topics[1].Name != "bar" { t.Error("Decoding produced invalid topic 0 name.") } if len(response.Topics[1].Partitions) != 0 { t.Error("Decoding produced invalid partition count for topic 1.") } } func TestMetadataResponseWithBrokersV1(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "topics, V1", &response, brokersNoTopicsMetadataResponseV1, 1) if len(response.Brokers) != 2 { t.Error("Decoding produced", len(response.Brokers), "brokers where there were 2!") } if response.Brokers[0].rack == nil || *response.Brokers[0].rack != "rack0" { t.Error("Decoding produced invalid broker 0 rack.") } if response.Brokers[1].rack == nil || *response.Brokers[1].rack != "rack1" { t.Error("Decoding produced invalid broker 1 rack.") } if response.ControllerID != 1 { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if len(response.Topics) != 0 { t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") } } func TestMetadataResponseWithTopicsV1(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "topics, V1", &response, topicsNoBrokersMetadataResponseV1, 1) if len(response.Brokers) != 0 { t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") } if response.ControllerID != 4 { t.Error("Decoding produced", response.ControllerID, "should have been 4!") } if len(response.Topics) != 2 { t.Error("Decoding produced", len(response.Topics), "topics where there were 2!") } if response.Topics[0].IsInternal { t.Error("Decoding produced", response.Topics[0], "topic0 should have been false!") } if !response.Topics[1].IsInternal { t.Error("Decoding produced", response.Topics[1], "topic1 should have been true!") } } func TestMetadataResponseWithThrottleTime(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no topics, no brokers, throttle time and cluster Id V3", &response, noBrokersNoTopicsWithThrottleTimeAndClusterIDV3, 3) if response.ThrottleTimeMs != int32(16) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 16!") } if len(response.Brokers) != 0 { t.Error("Decoding produced", response.Brokers, "should have been 0!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if len(response.Topics) != 0 { t.Error("Decoding produced", len(response.Topics), "should have been 0!") } } func TestMetadataResponseWithOfflineReplicasV5(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, noBrokersOneTopicWithOfflineReplicasV5, 5) if response.ThrottleTimeMs != int32(5) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 5!") } if len(response.Brokers) != 0 { t.Error("Decoding produced", response.Brokers, "should have been 0!") } if response.ControllerID != int32(2) { t.Error("Decoding produced", response.ControllerID, "should have been 21!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 1 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 1!") } } func TestMetadataResponseV6(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, OneTopicV6, 6) if response.ThrottleTimeMs != int32(7) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 7!") } if len(response.Brokers) != 1 { t.Error("Decoding produced", response.Brokers, "should have been 1!") } if response.Brokers[0].addr != "host:9092" { t.Error("Decoding produced", response.Brokers[0].addr, "should have been host:9092!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 0 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 0!") } } func TestMetadataResponseV7(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, OneTopicV7, 7) if response.ThrottleTimeMs != int32(0) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 0!") } if len(response.Brokers) != 1 { t.Error("Decoding produced", response.Brokers, "should have been 1!") } if response.Brokers[0].addr != "host:9092" { t.Error("Decoding produced", response.Brokers[0].addr, "should have been host:9092!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 0 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 0!") } if response.Topics[0].Partitions[0].LeaderEpoch != 123 { t.Error("Decoding produced", response.Topics[0].Partitions[0].LeaderEpoch, "should have been 123!") } } func TestMetadataResponseV8(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, OneTopicV8, 8) if response.ThrottleTimeMs != int32(0) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 0!") } if len(response.Brokers) != 1 { t.Error("Decoding produced", response.Brokers, "should have been 1!") } if response.Brokers[0].addr != "host:9092" { t.Error("Decoding produced", response.Brokers[0].addr, "should have been host:9092!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if response.ClusterAuthorizedOperations != 234 { t.Error("Decoding produced", response.ClusterAuthorizedOperations, "should have been 234!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if response.Topics[0].TopicAuthorizedOperations != 345 { t.Error("Decoding produced", response.Topics[0].TopicAuthorizedOperations, "should have been 345!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 0 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 0!") } if response.Topics[0].Partitions[0].LeaderEpoch != 123 { t.Error("Decoding produced", response.Topics[0].Partitions[0].LeaderEpoch, "should have been 123!") } } func TestMetadataResponseV9(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, OneTopicV9, 9) if response.ThrottleTimeMs != int32(0) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 0!") } if len(response.Brokers) != 1 { t.Error("Decoding produced", response.Brokers, "should have been 1!") } if response.Brokers[0].addr != "host:9092" { t.Error("Decoding produced", response.Brokers[0].addr, "should have been host:9092!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if response.ClusterAuthorizedOperations != 234 { t.Error("Decoding produced", response.ClusterAuthorizedOperations, "should have been 234!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if response.Topics[0].TopicAuthorizedOperations != 345 { t.Error("Decoding produced", response.Topics[0].TopicAuthorizedOperations, "should have been 345!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 0 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 0!") } if response.Topics[0].Partitions[0].LeaderEpoch != 123 { t.Error("Decoding produced", response.Topics[0].Partitions[0].LeaderEpoch, "should have been 123!") } } func TestMetadataResponseV10(t *testing.T) { response := MetadataResponse{} testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, OneTopicV10, 10) if response.ThrottleTimeMs != int32(0) { t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 0!") } if len(response.Brokers) != 1 { t.Error("Decoding produced", response.Brokers, "should have been 1!") } if response.Brokers[0].addr != "host:9092" { t.Error("Decoding produced", response.Brokers[0].addr, "should have been host:9092!") } if response.ControllerID != int32(1) { t.Error("Decoding produced", response.ControllerID, "should have been 1!") } if *response.ClusterID != "clusterId" { t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") } if response.ClusterAuthorizedOperations != 234 { t.Error("Decoding produced", response.ClusterAuthorizedOperations, "should have been 234!") } if len(response.Topics) != 1 { t.Error("Decoding produced", len(response.Topics), "should have been 1!") } if response.Topics[0].Uuid != [16]byte{ 0x84, 0xcd, 0xa7, 0x55, 0x7e, 0x84, 0x4b, 0xf9, 0xb7, 0xdc, 0xfc, 0x11, 0x82, 0x07, 0x72, 0x4a, } { t.Error("Decoding produced", response.Topics[0].Uuid, "should have been different!") } if response.Topics[0].TopicAuthorizedOperations != 345 { t.Error("Decoding produced", response.Topics[0].TopicAuthorizedOperations, "should have been 345!") } if len(response.Topics[0].Partitions[0].OfflineReplicas) != 0 { t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 0!") } if response.Topics[0].Partitions[0].LeaderEpoch != 123 { t.Error("Decoding produced", response.Topics[0].Partitions[0].LeaderEpoch, "should have been 123!") } } golang-github-ibm-sarama-1.43.2/metrics.go000066400000000000000000000070301461256741300203450ustar00rootroot00000000000000package sarama import ( "fmt" "strings" "sync" "github.com/rcrowley/go-metrics" ) // Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: // 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, // and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. // See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 const ( metricsReservoirSize = 1028 metricsAlphaFactor = 0.015 ) func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { return r.GetOrRegister(name, func() metrics.Histogram { return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) }).(metrics.Histogram) } func getMetricNameForBroker(name string, broker *Broker) string { // Use broker id like the Java client as it does not contain '.' or ':' characters that // can be interpreted as special character by monitoring tool (e.g. Graphite) return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) } func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 return fmt.Sprintf(name+"-for-topic-%s", strings.ReplaceAll(topic, ".", "_")) } func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) } func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) } // cleanupRegistry is an implementation of metrics.Registry that allows // to unregister from the parent registry only those metrics // that have been registered in cleanupRegistry type cleanupRegistry struct { parent metrics.Registry metrics map[string]struct{} mutex sync.RWMutex } func newCleanupRegistry(parent metrics.Registry) metrics.Registry { return &cleanupRegistry{ parent: parent, metrics: map[string]struct{}{}, } } func (r *cleanupRegistry) Each(fn func(string, interface{})) { r.mutex.RLock() defer r.mutex.RUnlock() wrappedFn := func(name string, iface interface{}) { if _, ok := r.metrics[name]; ok { fn(name, iface) } } r.parent.Each(wrappedFn) } func (r *cleanupRegistry) Get(name string) interface{} { r.mutex.RLock() defer r.mutex.RUnlock() if _, ok := r.metrics[name]; ok { return r.parent.Get(name) } return nil } func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} { r.mutex.Lock() defer r.mutex.Unlock() r.metrics[name] = struct{}{} return r.parent.GetOrRegister(name, metric) } func (r *cleanupRegistry) Register(name string, metric interface{}) error { r.mutex.Lock() defer r.mutex.Unlock() r.metrics[name] = struct{}{} return r.parent.Register(name, metric) } func (r *cleanupRegistry) RunHealthchecks() { r.parent.RunHealthchecks() } func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} { return r.parent.GetAll() } func (r *cleanupRegistry) Unregister(name string) { r.mutex.Lock() defer r.mutex.Unlock() if _, ok := r.metrics[name]; ok { delete(r.metrics, name) r.parent.Unregister(name) } } func (r *cleanupRegistry) UnregisterAll() { r.mutex.Lock() defer r.mutex.Unlock() for name := range r.metrics { delete(r.metrics, name) r.parent.Unregister(name) } } golang-github-ibm-sarama-1.43.2/metrics_test.go000066400000000000000000000133741461256741300214140ustar00rootroot00000000000000package sarama import ( "testing" "github.com/rcrowley/go-metrics" ) func TestGetOrRegisterHistogram(t *testing.T) { metricRegistry := metrics.NewRegistry() histogram := getOrRegisterHistogram("name", metricRegistry) if histogram == nil { t.Error("Unexpected nil histogram") } // Fetch the metric foundHistogram := metricRegistry.Get("name") if foundHistogram != histogram { t.Error("Unexpected different histogram", foundHistogram, histogram) } // Try to register the metric again sameHistogram := getOrRegisterHistogram("name", metricRegistry) if sameHistogram != histogram { t.Error("Unexpected different histogram", sameHistogram, histogram) } } func TestGetMetricNameForBroker(t *testing.T) { metricName := getMetricNameForBroker("name", &Broker{id: 1}) if metricName != "name-for-broker-1" { t.Error("Unexpected metric name", metricName) } } // Common type and functions for metric validation type metricValidator struct { name string validator func(*testing.T, interface{}) } type metricValidators []*metricValidator func newMetricValidators() metricValidators { return make([]*metricValidator, 0, 32) } func (m *metricValidators) register(validator *metricValidator) { *m = append(*m, validator) } func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) { m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator}) } func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) { m.register(&metricValidator{validator.name, validator.validator}) m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator}) } func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) { m.register(validator) m.registerForBroker(broker, validator) } func (m metricValidators) run(t *testing.T, r metrics.Registry) { t.Helper() for _, metricValidator := range m { metric := r.Get(metricValidator.name) if metric == nil { t.Error("No metric named", metricValidator.name) } else { metricValidator.validator(t, metric) } } } func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator { return &metricValidator{ name: name, validator: func(t *testing.T, metric interface{}) { t.Helper() if meter, ok := metric.(metrics.Meter); !ok { t.Errorf("Expected meter metric for '%s', got %T", name, metric) } else { extraValidator(t, meter) } }, } } func countMeterValidator(name string, expectedCount int) *metricValidator { return meterValidator(name, func(t *testing.T, meter metrics.Meter) { t.Helper() count := meter.Count() if count != int64(expectedCount) { t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count) } }) } func minCountMeterValidator(name string, minCount int) *metricValidator { return meterValidator(name, func(t *testing.T, meter metrics.Meter) { t.Helper() count := meter.Count() if count < int64(minCount) { t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count) } }) } func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator { return &metricValidator{ name: name, validator: func(t *testing.T, metric interface{}) { t.Helper() if histogram, ok := metric.(metrics.Histogram); !ok { t.Errorf("Expected histogram metric for '%s', got %T", name, metric) } else { extraValidator(t, histogram) } }, } } func countHistogramValidator(name string, expectedCount int) *metricValidator { return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { t.Helper() count := histogram.Count() if count != int64(expectedCount) { t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count) } }) } func minCountHistogramValidator(name string, minCount int) *metricValidator { return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { t.Helper() count := histogram.Count() if count < int64(minCount) { t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count) } }) } func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator { return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { t.Helper() min := int(histogram.Min()) if min != expectedMin { t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min) } max := int(histogram.Max()) if max != expectedMax { t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max) } }) } func minValHistogramValidator(name string, minMin int) *metricValidator { return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { t.Helper() min := int(histogram.Min()) if min < minMin { t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min) } }) } func maxValHistogramValidator(name string, maxMax int) *metricValidator { return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { t.Helper() max := int(histogram.Max()) if max > maxMax { t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max) } }) } func counterValidator(name string, expectedCount int) *metricValidator { return &metricValidator{ name: name, validator: func(t *testing.T, metric interface{}) { t.Helper() if counter, ok := metric.(metrics.Counter); !ok { t.Errorf("Expected counter metric for '%s', got %T", name, metric) } else { count := counter.Count() if count != int64(expectedCount) { t.Errorf("Expected counter metric '%s' count = %d, got %d", name, expectedCount, count) } } }, } } golang-github-ibm-sarama-1.43.2/mockbroker.go000066400000000000000000000320601461256741300210360ustar00rootroot00000000000000package sarama import ( "bytes" "encoding/binary" "errors" "fmt" "io" "net" "reflect" "strconv" "sync" "syscall" "time" "github.com/davecgh/go-spew/spew" ) const ( expectationTimeout = 500 * time.Millisecond ) type GSSApiHandlerFunc func([]byte) []byte type requestHandlerFunc func(req *request) (res encoderWithHeader) // RequestNotifierFunc is invoked when a mock broker processes a request successfully // and will provides the number of bytes read and written. type RequestNotifierFunc func(bytesRead, bytesWritten int) // MockBroker is a mock Kafka broker that is used in unit tests. It is exposed // to facilitate testing of higher level or specialized consumers and producers // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, // but rather provides a facility to do that. It takes care of the TCP // transport, request unmarshalling, response marshaling, and makes it the test // writer responsibility to program correct according to the Kafka API protocol // MockBroker behavior. // // MockBroker is implemented as a TCP server listening on a kernel-selected // localhost port that can accept many connections. It reads Kafka requests // from that connection and returns responses programmed by the SetHandlerByMap // function. If a MockBroker receives a request that it has no programmed // response for, then it returns nothing and the request times out. // // A set of MockRequest builders to define mappings used by MockBroker is // provided by Sarama. But users can develop MockRequests of their own and use // them along with or instead of the standard ones. // // When running tests with MockBroker it is strongly recommended to specify // a timeout to `go test` so that if the broker hangs waiting for a response, // the test panics. // // It is not necessary to prefix message length or correlation ID to your // response bytes, the server does that automatically as a convenience. type MockBroker struct { brokerID int32 port int32 closing chan none stopper chan none expectations chan encoderWithHeader listener net.Listener t TestReporter latency time.Duration handler requestHandlerFunc notifier RequestNotifierFunc history []RequestResponse lock sync.Mutex gssApiHandler GSSApiHandlerFunc } // RequestResponse represents a Request/Response pair processed by MockBroker. type RequestResponse struct { Request protocolBody Response encoder } // SetLatency makes broker pause for the specified period every time before // replying. func (b *MockBroker) SetLatency(latency time.Duration) { b.latency = latency } // SetHandlerByMap defines mapping of Request types to MockResponses. When a // request is received by the broker, it looks up the request type in the map // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { fnMap := make(map[string]MockResponse) for k, v := range handlerMap { fnMap[k] = v } b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() mockResponse := fnMap[reqTypeName] if mockResponse == nil { return nil } return mockResponse.For(req.body) }) } // SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a // request is received by the broker, it looks up the request type in the map // and invoke the found RequestHandlerFunc instance to generate an appropriate reply. func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) { fnMap := make(map[string]requestHandlerFunc) for k, v := range handlerMap { fnMap[k] = v } b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() return fnMap[reqTypeName](req) }) } // SetNotifier set a function that will get invoked whenever a request has been // processed successfully and will provide the number of bytes read and written func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { b.lock.Lock() b.notifier = notifier b.lock.Unlock() } // BrokerID returns broker ID assigned to the broker. func (b *MockBroker) BrokerID() int32 { return b.brokerID } // History returns a slice of RequestResponse pairs in the order they were // processed by the broker. Note that in case of multiple connections to the // broker the order expected by a test can be different from the order recorded // in the history, unless some synchronization is implemented in the test. func (b *MockBroker) History() []RequestResponse { b.lock.Lock() history := make([]RequestResponse, len(b.history)) copy(history, b.history) b.lock.Unlock() return history } // Port returns the TCP port number the broker is listening for requests on. func (b *MockBroker) Port() int32 { return b.port } // Addr returns the broker connection string in the form "
:". func (b *MockBroker) Addr() string { return b.listener.Addr().String() } // Close terminates the broker blocking until it stops internal goroutines and // releases all resources. func (b *MockBroker) Close() { close(b.expectations) if len(b.expectations) > 0 { buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) for e := range b.expectations { _, _ = buf.WriteString(spew.Sdump(e)) } b.t.Error(buf.String()) } close(b.closing) <-b.stopper } // setHandler sets the specified function as the request handler. Whenever // a mock broker reads a request from the wire it passes the request to the // function and sends back whatever the handler function returns. func (b *MockBroker) setHandler(handler requestHandlerFunc) { b.lock.Lock() b.handler = handler b.lock.Unlock() } func (b *MockBroker) serverLoop() { defer close(b.stopper) var err error var conn net.Conn go func() { <-b.closing err := b.listener.Close() if err != nil { b.t.Error(err) } }() wg := &sync.WaitGroup{} i := 0 for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { wg.Add(1) go b.handleRequests(conn, i, wg) i++ } wg.Wait() if !isConnectionClosedError(err) { Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) } } func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { b.gssApiHandler = handler } func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) { var ( bytesRead int lengthBytes = make([]byte, 4) ) if _, err := io.ReadFull(r, lengthBytes); err != nil { return nil, err } bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) if length <= 4 || length > MaxRequestSize { return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } encodedReq := make([]byte, length) if _, err := io.ReadFull(r, encodedReq); err != nil { return nil, err } bytesRead += len(encodedReq) fullBytes := append(lengthBytes, encodedReq...) return fullBytes, nil } func (b *MockBroker) isGSSAPI(buffer []byte) bool { return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) } func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { defer wg.Done() defer func() { _ = conn.Close() }() s := spew.NewDefaultConfig() s.MaxDepth = 1 Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) var err error abort := make(chan none) defer close(abort) go func() { select { case <-b.closing: _ = conn.Close() case <-abort: } }() var bytesWritten int var bytesRead int for { buffer, err := b.readToBytes(conn) if err != nil { if !isConnectionClosedError(err) { Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) b.serverError(err) } break } bytesWritten = 0 if !b.isGSSAPI(buffer) { req, br, err := decodeRequest(bytes.NewReader(buffer)) bytesRead = br if err != nil { if !isConnectionClosedError(err) { Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) b.serverError(err) } break } if b.latency > 0 { time.Sleep(b.latency) } b.lock.Lock() res := b.handler(req) b.history = append(b.history, RequestResponse{req.body, res}) b.lock.Unlock() if res == nil { Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) continue } Logger.Printf( "*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s", b.brokerID, idx, req.body, res, s.Sprintf("%#v", req.body), s.Sprintf("%#v", res), ) encodedRes, err := encode(res, nil) if err != nil { b.serverError(fmt.Errorf("failed to encode %T - %w", res, err)) break } if len(encodedRes) == 0 { b.lock.Lock() if b.notifier != nil { b.notifier(bytesRead, 0) } b.lock.Unlock() continue } resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) if _, err = conn.Write(resHeader); err != nil { b.serverError(err) break } if _, err = conn.Write(encodedRes); err != nil { b.serverError(err) break } bytesWritten = len(resHeader) + len(encodedRes) } else { // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism b.lock.Lock() res := b.gssApiHandler(buffer) b.lock.Unlock() if res == nil { Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer)) continue } if _, err = conn.Write(res); err != nil { b.serverError(err) break } bytesWritten = len(res) } b.lock.Lock() if b.notifier != nil { b.notifier(bytesRead, bytesWritten) } b.lock.Unlock() } Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { headerLength := uint32(8) if headerVersion >= 1 { headerLength = 9 } resHeader := make([]byte, headerLength) binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) if headerVersion >= 1 { binary.PutUvarint(resHeader[8:], 0) } return resHeader } func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { select { case res, ok := <-b.expectations: if !ok { return nil } return res case <-time.After(expectationTimeout): return nil } } func isConnectionClosedError(err error) bool { var result bool opError := &net.OpError{} if errors.As(err, &opError) { result = true } else if errors.Is(err, io.EOF) { result = true } else if err.Error() == "use of closed network connection" { result = true } return result } func (b *MockBroker) serverError(err error) { b.t.Helper() if isConnectionClosedError(err) { return } b.t.Errorf(err.Error()) } // NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the // test framework and a channel of responses to use. If an error occurs it is // simply logged to the TestReporter and the broker exits. func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { return NewMockBrokerAddr(t, brokerID, "localhost:0") } // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { var ( listener net.Listener err error ) // retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown) for i := 0; i < 20; i++ { listener, err = net.Listen("tcp", addr) if err != nil { if errors.Is(err, syscall.EADDRINUSE) { Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr) time.Sleep(time.Millisecond * 100) continue } t.Fatal(err) } break } if err != nil { t.Fatal(err) } return NewMockBrokerListener(t, brokerID, listener) } // NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { var err error broker := &MockBroker{ closing: make(chan none), stopper: make(chan none), t: t, brokerID: brokerID, expectations: make(chan encoderWithHeader, 512), listener: listener, } broker.handler = broker.defaultRequestHandler Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) if err != nil { t.Fatal(err) } tmp, err := strconv.ParseInt(portStr, 10, 32) if err != nil { t.Fatal(err) } broker.port = int32(tmp) go broker.serverLoop() return broker } func (b *MockBroker) Returns(e encoderWithHeader) { b.expectations <- e } golang-github-ibm-sarama-1.43.2/mockkerberos.go000066400000000000000000000101251461256741300213640ustar00rootroot00000000000000package sarama import ( "encoding/binary" "encoding/hex" "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/gssapi" "github.com/jcmturner/gokrb5/v8/iana/keyusage" "github.com/jcmturner/gokrb5/v8/messages" "github.com/jcmturner/gokrb5/v8/types" ) type KafkaGSSAPIHandler struct { client *MockKerberosClient badResponse bool badKeyChecksum bool } func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { // Default payload used for verify err := h.client.Login() // Mock client construct keys when login if err != nil { return nil } if h.badResponse { // Returns trash return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} } pack := gssapi.WrapToken{ Flags: KRB5_USER_AUTH, EC: 12, RRC: 0, SndSeqNum: 3398292281, Payload: []byte{0x11, 0x00}, // 1100 } // Compute checksum if h.badKeyChecksum { pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} } else { err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL) if err != nil { return nil } } packBytes, err := pack.Marshal() if err != nil { return nil } lenBytes := len(packBytes) response := make([]byte, lenBytes+4) copy(response[4:], packBytes) binary.BigEndian.PutUint32(response, uint32(lenBytes)) return response } type MockKerberosClient struct { asRepBytes string ASRep messages.ASRep credentials *credentials.Credentials mockError error errorStage string } func (c *MockKerberosClient) Login() error { if c.errorStage == "login" && c.mockError != nil { return c.mockError } c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" + "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" + "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" + "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" + "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" + "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" + "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" + "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" + "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" + "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" + "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" + "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" + "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" + "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" + "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885" apRepBytes, err := hex.DecodeString(c.asRepBytes) if err != nil { return err } err = c.ASRep.Unmarshal(apRepBytes) if err != nil { return err } c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty") _, err = c.ASRep.DecryptEncPart(c.credentials) if err != nil { return err } return nil } func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { if c.errorStage == "service_ticket" && c.mockError != nil { return messages.Ticket{}, types.EncryptionKey{}, c.mockError } return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil } func (c *MockKerberosClient) Domain() string { return "EXAMPLE.COM" } func (c *MockKerberosClient) CName() types.PrincipalName { p := types.PrincipalName{ NameType: KRB5_USER_AUTH, NameString: []string{ "kafka", "kafka", }, } return p } func (c *MockKerberosClient) Destroy() { // Do nothing. } golang-github-ibm-sarama-1.43.2/mockresponses.go000066400000000000000000001200611461256741300215720ustar00rootroot00000000000000package sarama import ( "fmt" "strings" "sync" ) // TestReporter has methods matching go's testing.T to avoid importing // `testing` in the main part of the library. type TestReporter interface { Error(...interface{}) Errorf(string, ...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) Helper() } // MockResponse is a response builder interface it defines one method that // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { For(reqBody versionedDecoder) (res encoderWithHeader) } // MockWrapper is a mock response builder that returns a particular concrete // response regardless of the actual request passed to the `For` method. type MockWrapper struct { res encoderWithHeader } func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { return mw.res } func NewMockWrapper(res encoderWithHeader) *MockWrapper { return &MockWrapper{res: res} } // MockSequence is a mock response builder that is created from a sequence of // concrete responses. Every time when a `MockBroker` calls its `For` method // the next response from the sequence is returned. When the end of the // sequence is reached the last element from the sequence is returned. type MockSequence struct { responses []MockResponse } func NewMockSequence(responses ...interface{}) *MockSequence { ms := &MockSequence{} ms.responses = make([]MockResponse, len(responses)) for i, res := range responses { switch res := res.(type) { case MockResponse: ms.responses[i] = res case encoderWithHeader: ms.responses[i] = NewMockWrapper(res) default: panic(fmt.Sprintf("Unexpected response type: %T", res)) } } return ms } func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] } return res } type MockListGroupsResponse struct { groups map[string]string t TestReporter } func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { return &MockListGroupsResponse{ groups: make(map[string]string), t: t, } } func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*ListGroupsRequest) response := &ListGroupsResponse{ Version: request.Version, Groups: m.groups, } return response } func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { m.groups[groupID] = protocolType return m } type MockDescribeGroupsResponse struct { groups map[string]*GroupDescription t TestReporter } func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { return &MockDescribeGroupsResponse{ t: t, groups: make(map[string]*GroupDescription), } } func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { m.groups[groupID] = description return m } func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*DescribeGroupsRequest) response := &DescribeGroupsResponse{Version: request.version()} for _, requestedGroup := range request.Groups { if group, ok := m.groups[requestedGroup]; ok { response.Groups = append(response.Groups, group) } else { // Mimic real kafka - if a group doesn't exist, return // an entry with state "Dead" response.Groups = append(response.Groups, &GroupDescription{ GroupId: requestedGroup, State: "Dead", }) } } return response } // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { controllerID int32 errors map[string]KError leaders map[string]map[int32]int32 brokers map[string]int32 t TestReporter } func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { return &MockMetadataResponse{ errors: make(map[string]KError), leaders: make(map[string]map[int32]int32), brokers: make(map[string]int32), t: t, } } func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse { mmr.errors[topic] = kerror return mmr } func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { partitions := mmr.leaders[topic] if partitions == nil { partitions = make(map[int32]int32) mmr.leaders[topic] = partitions } partitions[partition] = brokerID return mmr } func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { mmr.brokers[addr] = brokerID return mmr } func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { mmr.controllerID = brokerID return mmr } func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { metadataRequest := reqBody.(*MetadataRequest) metadataResponse := &MetadataResponse{ Version: metadataRequest.version(), ControllerID: mmr.controllerID, } for addr, brokerID := range mmr.brokers { metadataResponse.AddBroker(addr, brokerID) } // Generate set of replicas var replicas []int32 var offlineReplicas []int32 for _, brokerID := range mmr.brokers { replicas = append(replicas, brokerID) } if len(metadataRequest.Topics) == 0 { for topic, partitions := range mmr.leaders { for partition, brokerID := range partitions { metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } for topic, err := range mmr.errors { metadataResponse.AddTopic(topic, err) } return metadataResponse } for _, topic := range metadataRequest.Topics { leaders, ok := mmr.leaders[topic] if !ok { if err, ok := mmr.errors[topic]; ok { metadataResponse.AddTopic(topic, err) } else { metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition) } continue } for partition, brokerID := range leaders { metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse } // MockOffsetResponse is an `OffsetResponse` builder. type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { return &MockOffsetResponse{ offsets: make(map[string]map[int32]map[int64]int64), t: t, } } func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { partitions = make(map[int32]map[int64]int64) mor.offsets[topic] = partitions } times := partitions[partition] if times == nil { times = make(map[int64]int64) partitions[partition] = times } times[time] = offset return mor } func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) offsetResponse := &OffsetResponse{Version: offsetRequest.Version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.timestamp) offsetResponse.AddTopicPartition(topic, partition, offset) } } return offsetResponse } func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { partitions := mor.offsets[topic] if partitions == nil { mor.t.Errorf("missing topic: %s", topic) } times := partitions[partition] if times == nil { mor.t.Errorf("missing partition: %d", partition) } offset, ok := times[time] if !ok { mor.t.Errorf("missing time: %d", time) } return offset } // mockMessage is a message that used to be mocked for `FetchResponse` type mockMessage struct { key Encoder msg Encoder } func newMockMessage(key, msg Encoder) *mockMessage { return &mockMessage{ key: key, msg: msg, } } // MockFetchResponse is a `FetchResponse` builder. type MockFetchResponse struct { messages map[string]map[int32]map[int64]*mockMessage messagesLock *sync.RWMutex highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { return &MockFetchResponse{ messages: make(map[string]map[int32]map[int64]*mockMessage), messagesLock: &sync.RWMutex{}, highWaterMarks: make(map[string]map[int32]int64), t: t, batchSize: batchSize, } } func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { return mfr.SetMessageWithKey(topic, partition, offset, nil, msg) } func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse { mfr.messagesLock.Lock() defer mfr.messagesLock.Unlock() partitions := mfr.messages[topic] if partitions == nil { partitions = make(map[int32]map[int64]*mockMessage) mfr.messages[topic] = partitions } messages := partitions[partition] if messages == nil { messages = make(map[int64]*mockMessage) partitions[partition] = messages } messages[offset] = newMockMessage(key, msg) return mfr } func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { partitions := mfr.highWaterMarks[topic] if partitions == nil { partitions = make(map[int32]int64) mfr.highWaterMarks[topic] = partitions } partitions[partition] = offset return mfr } func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ Version: fetchRequest.Version, } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { initialOffset := block.fetchOffset offset := initialOffset maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) for i := 0; i < mfr.batchSize && offset < maxOffset; { msg := mfr.getMessage(topic, partition, offset) if msg != nil { res.AddMessage(topic, partition, msg.key, msg.msg, offset) i++ } offset++ } fb := res.GetBlock(topic, partition) if fb == nil { res.AddError(topic, partition, ErrNoError) fb = res.GetBlock(topic, partition) } fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) } } return res } func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage { mfr.messagesLock.RLock() defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return nil } messages := partitions[partition] if messages == nil { return nil } return messages[offset] } func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { mfr.messagesLock.RLock() defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return 0 } messages := partitions[partition] if messages == nil { return 0 } return len(messages) } func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { partitions := mfr.highWaterMarks[topic] if partitions == nil { return 0 } return partitions[partition] } // MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. type MockConsumerMetadataResponse struct { coordinators map[string]interface{} t TestReporter } func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { return &MockConsumerMetadataResponse{ coordinators: make(map[string]interface{}), t: t, } } func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { mr.coordinators[group] = broker return mr } func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { mr.coordinators[group] = kerror return mr } func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{Version: req.version()} v := mr.coordinators[group] switch v := v.(type) { case *MockBroker: res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} case KError: res.Err = v } return res } // MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. type MockFindCoordinatorResponse struct { groupCoordinators map[string]interface{} transCoordinators map[string]interface{} t TestReporter } func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { return &MockFindCoordinatorResponse{ groupCoordinators: make(map[string]interface{}), transCoordinators: make(map[string]interface{}), t: t, } } func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { switch coordinatorType { case CoordinatorGroup: mr.groupCoordinators[group] = broker case CoordinatorTransaction: mr.transCoordinators[group] = broker } return mr } func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { switch coordinatorType { case CoordinatorGroup: mr.groupCoordinators[group] = kerror case CoordinatorTransaction: mr.transCoordinators[group] = kerror } return mr } func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{Version: req.version()} var v interface{} switch req.CoordinatorType { case CoordinatorGroup: v = mr.groupCoordinators[req.CoordinatorKey] case CoordinatorTransaction: v = mr.transCoordinators[req.CoordinatorKey] } switch v := v.(type) { case *MockBroker: res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} case KError: res.Err = v } return res } // MockOffsetCommitResponse is a `OffsetCommitResponse` builder. type MockOffsetCommitResponse struct { errors map[string]map[string]map[int32]KError t TestReporter } func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { return &MockOffsetCommitResponse{t: t} } func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { if mr.errors == nil { mr.errors = make(map[string]map[string]map[int32]KError) } topics := mr.errors[group] if topics == nil { topics = make(map[string]map[int32]KError) mr.errors[group] = topics } partitions := topics[topic] if partitions == nil { partitions = make(map[int32]KError) topics[topic] = partitions } partitions[partition] = kerror return mr } func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{Version: req.version()} for topic, partitions := range req.blocks { for partition := range partitions { res.AddError(topic, partition, mr.getError(group, topic, partition)) } } return res } func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { topics := mr.errors[group] if topics == nil { return ErrNoError } partitions := topics[topic] if partitions == nil { return ErrNoError } kerror, ok := partitions[partition] if !ok { return ErrNoError } return kerror } // MockProduceResponse is a `ProduceResponse` builder. type MockProduceResponse struct { version int16 errors map[string]map[int32]KError t TestReporter } func NewMockProduceResponse(t TestReporter) *MockProduceResponse { return &MockProduceResponse{t: t} } func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { mr.version = version return mr } func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { if mr.errors == nil { mr.errors = make(map[string]map[int32]KError) } partitions := mr.errors[topic] if partitions == nil { partitions = make(map[int32]KError) mr.errors[topic] = partitions } partitions[partition] = kerror return mr } func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ Version: req.version(), } if mr.version > 0 { res.Version = mr.version } for topic, partitions := range req.records { for partition := range partitions { res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) } } return res } func (mr *MockProduceResponse) getError(topic string, partition int32) KError { partitions := mr.errors[topic] if partitions == nil { return ErrNoError } kerror, ok := partitions[partition] if !ok { return ErrNoError } return kerror } // MockOffsetFetchResponse is a `OffsetFetchResponse` builder. type MockOffsetFetchResponse struct { offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock error KError t TestReporter } func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { return &MockOffsetFetchResponse{t: t} } func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { if mr.offsets == nil { mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) } topics := mr.offsets[group] if topics == nil { topics = make(map[string]map[int32]*OffsetFetchResponseBlock) mr.offsets[group] = topics } partitions := topics[topic] if partitions == nil { partitions = make(map[int32]*OffsetFetchResponseBlock) topics[topic] = partitions } partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror} return mr } func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse { mr.error = kerror return mr } func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup res := &OffsetFetchResponse{Version: req.Version} for topic, partitions := range mr.offsets[group] { for partition, block := range partitions { res.AddBlock(topic, partition, block) } } if res.Version >= 2 { res.Err = mr.error } return res } type MockCreateTopicsResponse struct { t TestReporter } func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { return &MockCreateTopicsResponse{t: t} } func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateTopicsRequest) res := &CreateTopicsResponse{ Version: req.Version, } res.TopicErrors = make(map[string]*TopicError) for topic := range req.TopicDetails { if res.Version >= 1 && strings.HasPrefix(topic, "_") { msg := "insufficient permissions to create topic with reserved prefix" res.TopicErrors[topic] = &TopicError{ Err: ErrTopicAuthorizationFailed, ErrMsg: &msg, } continue } res.TopicErrors[topic] = &TopicError{Err: ErrNoError} } return res } type MockDeleteTopicsResponse struct { t TestReporter error KError } func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { return &MockDeleteTopicsResponse{t: t} } func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) res := &DeleteTopicsResponse{Version: req.version()} res.TopicErrorCodes = make(map[string]KError) for _, topic := range req.Topics { res.TopicErrorCodes[topic] = mr.error } res.Version = req.Version return res } func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse { mr.error = kerror return mr } type MockCreatePartitionsResponse struct { t TestReporter } func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse { return &MockCreatePartitionsResponse{t: t} } func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) res := &CreatePartitionsResponse{Version: req.version()} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) for topic := range req.TopicPartitions { if strings.HasPrefix(topic, "_") { msg := "insufficient permissions to create partition on topic with reserved prefix" res.TopicPartitionErrors[topic] = &TopicPartitionError{ Err: ErrTopicAuthorizationFailed, ErrMsg: &msg, } continue } res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} } return res } type MockAlterPartitionReassignmentsResponse struct { t TestReporter } func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { return &MockAlterPartitionReassignmentsResponse{t: t} } func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterPartitionReassignmentsRequest) _ = req res := &AlterPartitionReassignmentsResponse{Version: req.version()} return res } type MockListPartitionReassignmentsResponse struct { t TestReporter } func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { return &MockListPartitionReassignmentsResponse{t: t} } func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ListPartitionReassignmentsRequest) _ = req res := &ListPartitionReassignmentsResponse{Version: req.version()} for topic, partitions := range req.blocks { for _, partition := range partitions { res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) } } return res } type MockDeleteRecordsResponse struct { t TestReporter } func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { return &MockDeleteRecordsResponse{t: t} } func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) res := &DeleteRecordsResponse{Version: req.version()} res.Topics = make(map[string]*DeleteRecordsResponseTopic) for topic, deleteRecordRequestTopic := range req.Topics { partitions := make(map[int32]*DeleteRecordsResponsePartition) for partition := range deleteRecordRequestTopic.PartitionOffsets { partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} } res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} } return res } type MockDescribeConfigsResponse struct { t TestReporter } func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse { return &MockDescribeConfigsResponse{t: t} } func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeConfigsRequest) res := &DescribeConfigsResponse{ Version: req.Version, } includeSynonyms := req.Version > 0 includeSource := req.Version > 0 for _, r := range req.Resources { var configEntries []*ConfigEntry switch r.Type { case BrokerResource: configEntries = append(configEntries, &ConfigEntry{ Name: "min.insync.replicas", Value: "2", ReadOnly: false, Default: false, }, ) res.Resources = append(res.Resources, &ResourceResponse{ Name: r.Name, Configs: configEntries, }) case BrokerLoggerResource: configEntries = append(configEntries, &ConfigEntry{ Name: "kafka.controller.KafkaController", Value: "DEBUG", ReadOnly: false, Default: false, }, ) res.Resources = append(res.Resources, &ResourceResponse{ Name: r.Name, Configs: configEntries, }) case TopicResource: maxMessageBytes := &ConfigEntry{ Name: "max.message.bytes", Value: "1000000", ReadOnly: false, Default: !includeSource, Sensitive: false, } if includeSource { maxMessageBytes.Source = SourceDefault } if includeSynonyms { maxMessageBytes.Synonyms = []*ConfigSynonym{ { ConfigName: "max.message.bytes", ConfigValue: "500000", }, } } retentionMs := &ConfigEntry{ Name: "retention.ms", Value: "5000", ReadOnly: false, Default: false, Sensitive: false, } if includeSynonyms { retentionMs.Synonyms = []*ConfigSynonym{ { ConfigName: "log.retention.ms", ConfigValue: "2500", }, } } password := &ConfigEntry{ Name: "password", Value: "12345", ReadOnly: false, Default: false, Sensitive: true, } configEntries = append( configEntries, maxMessageBytes, retentionMs, password) res.Resources = append(res.Resources, &ResourceResponse{ Name: r.Name, Configs: configEntries, }) } } return res } type MockDescribeConfigsResponseWithErrorCode struct { t TestReporter } func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { return &MockDescribeConfigsResponseWithErrorCode{t: t} } func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeConfigsRequest) res := &DescribeConfigsResponse{ Version: req.Version, } for _, r := range req.Resources { res.Resources = append(res.Resources, &ResourceResponse{ Name: r.Name, Type: r.Type, ErrorCode: 83, ErrorMsg: "", }) } return res } type MockAlterConfigsResponse struct { t TestReporter } func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { return &MockAlterConfigsResponse{t: t} } func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ Name: r.Name, Type: r.Type, ErrorMsg: "", }) } return res } type MockAlterConfigsResponseWithErrorCode struct { t TestReporter } func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { return &MockAlterConfigsResponseWithErrorCode{t: t} } func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ Name: r.Name, Type: r.Type, ErrorCode: 83, ErrorMsg: "", }) } return res } type MockIncrementalAlterConfigsResponse struct { t TestReporter } func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse { return &MockIncrementalAlterConfigsResponse{t: t} } func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ Name: r.Name, Type: r.Type, ErrorMsg: "", }) } return res } type MockIncrementalAlterConfigsResponseWithErrorCode struct { t TestReporter } func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode { return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t} } func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ Name: r.Name, Type: r.Type, ErrorCode: 83, ErrorMsg: "", }) } return res } type MockCreateAclsResponse struct { t TestReporter } func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { return &MockCreateAclsResponse{t: t} } func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) } return res } type MockCreateAclsResponseError struct { t TestReporter } func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError { return &MockCreateAclsResponseError{t: t} } func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest}) } return res } type MockListAclsResponse struct { t TestReporter } func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { return &MockListAclsResponse{t: t} } func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) res := &DescribeAclsResponse{Version: req.version()} res.Err = ErrNoError acl := &ResourceAcls{} if req.ResourceName != nil { acl.Resource.ResourceName = *req.ResourceName } acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter acl.Resource.ResourceType = req.ResourceType host := "*" if req.Host != nil { host = *req.Host } principal := "User:test" if req.Principal != nil { principal = *req.Principal } permissionType := req.PermissionType if permissionType == AclPermissionAny { permissionType = AclPermissionAllow } acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal}) res.ResourceAcls = append(res.ResourceAcls, acl) res.Version = int16(req.Version) return res } type MockSaslAuthenticateResponse struct { t TestReporter kerror KError saslAuthBytes []byte sessionLifetimeMs int64 } func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { return &MockSaslAuthenticateResponse{t: t} } func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*SaslAuthenticateRequest) res := &SaslAuthenticateResponse{ Version: req.version(), Err: msar.kerror, SaslAuthBytes: msar.saslAuthBytes, SessionLifetimeMs: msar.sessionLifetimeMs, } return res } func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { msar.kerror = kerror return msar } func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { msar.saslAuthBytes = saslAuthBytes return msar } func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse { msar.sessionLifetimeMs = sessionLifetimeMs return msar } type MockDeleteAclsResponse struct { t TestReporter } type MockSaslHandshakeResponse struct { enabledMechanisms []string kerror KError t TestReporter } func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { return &MockSaslHandshakeResponse{t: t} } func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*SaslHandshakeRequest) res := &SaslHandshakeResponse{Version: req.version()} res.Err = mshr.kerror res.EnabledMechanisms = mshr.enabledMechanisms return res } func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { mshr.kerror = kerror return mshr } func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { mshr.enabledMechanisms = enabledMechanisms return mshr } func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { return &MockDeleteAclsResponse{t: t} } func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) res := &DeleteAclsResponse{Version: req.version()} for range req.Filters { response := &FilterResponse{Err: ErrNoError} response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) res.FilterResponses = append(res.FilterResponses, response) } res.Version = int16(req.Version) return res } type MockDeleteGroupsResponse struct { deletedGroups []string } func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse { return &MockDeleteGroupsResponse{} } func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse { m.deletedGroups = groups return m } func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteGroupsRequest) resp := &DeleteGroupsResponse{ Version: req.version(), GroupErrorCodes: map[string]KError{}, } for _, group := range m.deletedGroups { resp.GroupErrorCodes[group] = ErrNoError } return resp } type MockDeleteOffsetResponse struct { errorCode KError topic string partition int32 errorPartition KError } func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse { return &MockDeleteOffsetResponse{} } func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse { m.errorCode = errorCode m.topic = topic m.partition = partition m.errorPartition = errorPartition return m } func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteOffsetsRequest) resp := &DeleteOffsetsResponse{ Version: req.version(), ErrorCode: m.errorCode, Errors: map[string]map[int32]KError{ m.topic: {m.partition: m.errorPartition}, }, } return resp } type MockJoinGroupResponse struct { t TestReporter ThrottleTime int32 Err KError GenerationId int32 GroupProtocol string LeaderId string MemberId string Members []GroupMember } func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { return &MockJoinGroupResponse{ t: t, Members: make([]GroupMember, 0), } } func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*JoinGroupRequest) resp := &JoinGroupResponse{ Version: req.Version, ThrottleTime: m.ThrottleTime, Err: m.Err, GenerationId: m.GenerationId, GroupProtocol: m.GroupProtocol, LeaderId: m.LeaderId, MemberId: m.MemberId, Members: m.Members, } return resp } func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse { m.ThrottleTime = t return m } func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse { m.Err = kerr return m } func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse { m.GenerationId = id return m } func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse { m.GroupProtocol = proto return m } func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse { m.LeaderId = id return m } func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse { m.MemberId = id return m } func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse { bin, err := encode(meta, nil) if err != nil { panic(fmt.Sprintf("error encoding member metadata: %v", err)) } m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin}) return m } type MockLeaveGroupResponse struct { t TestReporter Err KError } func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { return &MockLeaveGroupResponse{t: t} } func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*LeaveGroupRequest) resp := &LeaveGroupResponse{ Version: req.version(), Err: m.Err, } return resp } func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse { m.Err = kerr return m } type MockSyncGroupResponse struct { t TestReporter Err KError MemberAssignment []byte } func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { return &MockSyncGroupResponse{t: t} } func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*SyncGroupRequest) resp := &SyncGroupResponse{ Version: req.version(), Err: m.Err, MemberAssignment: m.MemberAssignment, } return resp } func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse { m.Err = kerr return m } func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse { bin, err := encode(assignment, nil) if err != nil { panic(fmt.Sprintf("error encoding member assignment: %v", err)) } m.MemberAssignment = bin return m } type MockHeartbeatResponse struct { t TestReporter Err KError } func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { return &MockHeartbeatResponse{t: t} } func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*HeartbeatRequest) resp := &HeartbeatResponse{ Version: req.version(), } return resp } func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse { m.Err = kerr return m } type MockDescribeLogDirsResponse struct { t TestReporter logDirs []DescribeLogDirsResponseDirMetadata } func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { return &MockDescribeLogDirsResponse{t: t} } func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { var topics []DescribeLogDirsResponseTopic for topic := range topicPartitions { var partitions []DescribeLogDirsResponsePartition for i := 0; i < topicPartitions[topic]; i++ { partitions = append(partitions, DescribeLogDirsResponsePartition{ PartitionID: int32(i), IsTemporary: false, OffsetLag: int64(0), Size: int64(1234), }) } topics = append(topics, DescribeLogDirsResponseTopic{ Topic: topic, Partitions: partitions, }) } logDir := DescribeLogDirsResponseDirMetadata{ ErrorCode: ErrNoError, Path: logDirPath, Topics: topics, } m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} return m } func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeLogDirsRequest) resp := &DescribeLogDirsResponse{ Version: req.version(), LogDirs: m.logDirs, } return resp } type MockApiVersionsResponse struct { t TestReporter apiKeys []ApiVersionsResponseKey } func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse { return &MockApiVersionsResponse{ t: t, apiKeys: []ApiVersionsResponseKey{ { ApiKey: 0, MinVersion: 5, MaxVersion: 8, }, { ApiKey: 1, MinVersion: 7, MaxVersion: 11, }, }, } } func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse { m.apiKeys = apiKeys return m } func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ApiVersionsRequest) res := &ApiVersionsResponse{ Version: req.Version, ApiKeys: m.apiKeys, } return res } // MockInitProducerIDResponse is an `InitPorducerIDResponse` builder. type MockInitProducerIDResponse struct { producerID int64 producerEpoch int16 err KError t TestReporter } func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse { return &MockInitProducerIDResponse{ t: t, } } func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse { m.producerID = int64(id) return m } func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse { m.producerEpoch = int16(epoch) return m } func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse { m.err = err return m } func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*InitProducerIDRequest) res := &InitProducerIDResponse{ Version: req.Version, Err: m.err, ProducerID: m.producerID, ProducerEpoch: m.producerEpoch, } return res } golang-github-ibm-sarama-1.43.2/mocks/000077500000000000000000000000001461256741300174645ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/mocks/README.md000066400000000000000000000014341461256741300207450ustar00rootroot00000000000000# sarama/mocks The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. You can use them to test your sarama applications using dependency injection. The following mock objects are available: - [Consumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#Consumer), which will create [PartitionConsumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#PartitionConsumer) mocks. - [AsyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#AsyncProducer) - [SyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#SyncProducer) The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, and the results will be reported to the `*testing.T` object you provided when creating the mock. golang-github-ibm-sarama-1.43.2/mocks/async_producer.go000066400000000000000000000244271461256741300230440ustar00rootroot00000000000000package mocks import ( "errors" "sync" "github.com/IBM/sarama" ) // AsyncProducer implements sarama's Producer interface for testing purposes. // Before you can send messages to it's Input channel, you have to set expectations // so it knows how to handle the input; it returns an error if the number of messages // received is bigger then the number of expectations set. You can also set a // function in each expectation so that the message is checked by this function and // an error is returned if the match fails. type AsyncProducer struct { l sync.Mutex t ErrorReporter expectations []*producerExpectation closed chan struct{} input chan *sarama.ProducerMessage successes chan *sarama.ProducerMessage errors chan *sarama.ProducerError isTransactional bool txnLock sync.Mutex txnStatus sarama.ProducerTxnStatusFlag lastOffset int64 *TopicConfig } // NewAsyncProducer instantiates a new Producer mock. The t argument should // be the *testing.T instance of your test method. An error will be written to it if // an expectation is violated. The config argument is validated and used to determine // whether it should ack successes on the Successes channel and handle partitioning. func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { if config == nil { config = sarama.NewConfig() } if err := config.Validate(); err != nil { t.Errorf("Invalid mock configuration provided: %s", err.Error()) } mp := &AsyncProducer{ t: t, closed: make(chan struct{}), expectations: make([]*producerExpectation, 0), input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), isTransactional: config.Producer.Transaction.ID != "", txnStatus: sarama.ProducerTxnFlagReady, TopicConfig: NewTopicConfig(), } go func() { defer func() { close(mp.successes) close(mp.errors) close(mp.closed) }() partitioners := make(map[string]sarama.Partitioner, 1) for msg := range mp.input { mp.txnLock.Lock() if mp.IsTransactional() && mp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { mp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") mp.errors <- &sarama.ProducerError{Err: errors.New("attempt to send message when transaction is not started or is in ending state"), Msg: msg} continue } mp.txnLock.Unlock() partitioner := partitioners[msg.Topic] if partitioner == nil { partitioner = config.Producer.Partitioner(msg.Topic) partitioners[msg.Topic] = partitioner } mp.l.Lock() if mp.expectations == nil || len(mp.expectations) == 0 { mp.expectations = nil mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") } else { expectation := mp.expectations[0] mp.expectations = mp.expectations[1:] partition, err := partitioner.Partition(msg, mp.partitions(msg.Topic)) if err != nil { mp.t.Errorf("Partitioner returned an error: %s", err.Error()) mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} } else { msg.Partition = partition if expectation.CheckFunction != nil { err := expectation.CheckFunction(msg) if err != nil { mp.t.Errorf("Check function returned an error: %s", err.Error()) mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} } } if errors.Is(expectation.Result, errProduceSuccess) { mp.lastOffset++ if config.Producer.Return.Successes { msg.Offset = mp.lastOffset mp.successes <- msg } } else if config.Producer.Return.Errors { mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} } } } mp.l.Unlock() } mp.l.Lock() if len(mp.expectations) > 0 { mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) } mp.l.Unlock() }() return mp } //////////////////////////////////////////////// // Implement Producer interface //////////////////////////////////////////////// // AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. // By closing a mock producer, you also tell it that no more input will be provided, so it will // write an error to the test state if there's any remaining expectations. func (mp *AsyncProducer) AsyncClose() { close(mp.input) } // Close corresponds with the Close method of sarama's Producer implementation. // By closing a mock producer, you also tell it that no more input will be provided, so it will // write an error to the test state if there's any remaining expectations. func (mp *AsyncProducer) Close() error { mp.AsyncClose() <-mp.closed return nil } // Input corresponds with the Input method of sarama's Producer implementation. // You have to set expectations on the mock producer before writing messages to the Input // channel, so it knows how to handle them. If there is no more remaining expectations and // a messages is written to the Input channel, the mock producer will write an error to the test // state object. func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { return mp.input } // Successes corresponds with the Successes method of sarama's Producer implementation. func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { return mp.successes } // Errors corresponds with the Errors method of sarama's Producer implementation. func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { return mp.errors } func (mp *AsyncProducer) IsTransactional() bool { return mp.isTransactional } func (mp *AsyncProducer) BeginTxn() error { mp.txnLock.Lock() defer mp.txnLock.Unlock() mp.txnStatus = sarama.ProducerTxnFlagInTransaction return nil } func (mp *AsyncProducer) CommitTxn() error { mp.txnLock.Lock() defer mp.txnLock.Unlock() mp.txnStatus = sarama.ProducerTxnFlagReady return nil } func (mp *AsyncProducer) AbortTxn() error { mp.txnLock.Lock() defer mp.txnLock.Unlock() mp.txnStatus = sarama.ProducerTxnFlagReady return nil } func (mp *AsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { mp.txnLock.Lock() defer mp.txnLock.Unlock() return mp.txnStatus } func (mp *AsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { return nil } func (mp *AsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { return nil } //////////////////////////////////////////////// // Setting expectations //////////////////////////////////////////////// // ExpectInputWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer that a // message will be provided on the input channel. The mock producer will call the given function to // check the message. If an error is returned it will be made available on the Errors channel // otherwise the mock will handle the message as if it produced successfully, i.e. it will make it // available on the Successes channel if the Producer.Return.Successes setting is set to true. func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *AsyncProducer { mp.l.Lock() defer mp.l.Unlock() mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) return mp } // ExpectInputWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that a // message will be provided on the input channel. The mock producer will first call the given // function to check the message. If an error is returned it will be made available on the Errors // channel otherwise the mock will handle the message as if it failed to produce successfully. This // means it will make a ProducerError available on the Errors channel. func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *AsyncProducer { mp.l.Lock() defer mp.l.Unlock() mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) return mp } // ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message // will be provided on the input channel. The mock producer will call the given function to check // the message value. If an error is returned it will be made available on the Errors channel // otherwise the mock will handle the message as if it produced successfully, i.e. it will make // it available on the Successes channel if the Producer.Return.Successes setting is set to true. func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) *AsyncProducer { mp.ExpectInputWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) return mp } // ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message // will be provided on the input channel. The mock producer will first call the given function to // check the message value. If an error is returned it will be made available on the Errors channel // otherwise the mock will handle the message as if it failed to produce successfully. This means // it will make a ProducerError available on the Errors channel. func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) *AsyncProducer { mp.ExpectInputWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) return mp } // ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided // on the input channel. The mock producer will handle the message as if it is produced successfully, // i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting // is set to true. func (mp *AsyncProducer) ExpectInputAndSucceed() *AsyncProducer { mp.ExpectInputWithMessageCheckerFunctionAndSucceed(nil) return mp } // ExpectInputAndFail sets an expectation on the mock producer that a message will be provided // on the input channel. The mock producer will handle the message as if it failed to produce // successfully. This means it will make a ProducerError available on the Errors channel. func (mp *AsyncProducer) ExpectInputAndFail(err error) *AsyncProducer { mp.ExpectInputWithMessageCheckerFunctionAndFail(nil, err) return mp } golang-github-ibm-sarama-1.43.2/mocks/async_producer_test.go000066400000000000000000000156151461256741300241020ustar00rootroot00000000000000package mocks import ( "errors" "fmt" "regexp" "strings" "testing" "github.com/IBM/sarama" ) func generateRegexpChecker(re string) func([]byte) error { return func(val []byte) error { matched, err := regexp.MatchString(re, string(val)) if err != nil { return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error()) } if !matched { return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re) } return nil } } type testReporterMock struct { errors []string } func newTestReporterMock() *testReporterMock { return &testReporterMock{errors: make([]string, 0)} } func (trm *testReporterMock) Errorf(format string, args ...interface{}) { trm.errors = append(trm.errors, fmt.Sprintf(format, args...)) } func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) { var mp interface{} = &AsyncProducer{} if _, ok := mp.(sarama.AsyncProducer); !ok { t.Error("The mock producer should implement the sarama.Producer interface.") } } func TestProducerReturnsExpectationsToChannels(t *testing.T) { config := NewTestConfig() config.Producer.Return.Successes = true mp := NewAsyncProducer(t, config). ExpectInputAndSucceed(). ExpectInputAndSucceed(). ExpectInputAndFail(sarama.ErrOutOfBrokers) mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"} mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"} mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"} msg1 := <-mp.Successes() msg2 := <-mp.Successes() err1 := <-mp.Errors() if msg1.Topic != "test 1" { t.Error("Expected message 1 to be returned first") } if msg2.Topic != "test 2" { t.Error("Expected message 2 to be returned second") } if err1.Msg.Topic != "test 3" || !errors.Is(err1, sarama.ErrOutOfBrokers) { t.Error("Expected message 3 to be returned as error") } if err := mp.Close(); err != nil { t.Error(err) } } func TestProducerWithTooFewExpectations(t *testing.T) { trm := newTestReporterMock() mp := NewAsyncProducer(trm, nil) mp.ExpectInputAndSucceed() mp.Input() <- &sarama.ProducerMessage{Topic: "test"} mp.Input() <- &sarama.ProducerMessage{Topic: "test"} if err := mp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestProducerWithTooManyExpectations(t *testing.T) { trm := newTestReporterMock() mp := NewAsyncProducer(trm, nil). ExpectInputAndSucceed(). ExpectInputAndFail(sarama.ErrOutOfBrokers) mp.Input() <- &sarama.ProducerMessage{Topic: "test"} if err := mp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestProducerFailTxn(t *testing.T) { config := NewTestConfig() config.Producer.Transaction.ID = "test" config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = sarama.V0_11_0_0 trm := newTestReporterMock() mp := NewAsyncProducer(trm, config) mp.Input() <- &sarama.ProducerMessage{Topic: "test"} _ = mp.Close() if len(trm.errors) != 1 { t.Error("must have fail with txn begin error") } } func TestProducerWithTxn(t *testing.T) { config := NewTestConfig() config.Producer.Transaction.ID = "test" config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = sarama.V0_11_0_0 trm := newTestReporterMock() mp := NewAsyncProducer(trm, config). ExpectInputAndSucceed() if !mp.IsTransactional() { t.Error("producer must be transactional") } if err := mp.BeginTxn(); err != nil { t.Error(err) } if mp.TxnStatus()&sarama.ProducerTxnFlagInTransaction == 0 { t.Error("transaction must be started") } mp.Input() <- &sarama.ProducerMessage{Topic: "test"} if err := mp.AddMessageToTxn(&sarama.ConsumerMessage{ Topic: "original-topic", Partition: 0, Offset: 123, }, "test-group", nil); err != nil { t.Error(err) } if err := mp.AddOffsetsToTxn(map[string][]*sarama.PartitionOffsetMetadata{ "original-topic": { { Partition: 1, Offset: 321, }, }, }, "test-group"); err != nil { t.Error(err) } if err := mp.CommitTxn(); err != nil { t.Error(err) } if err := mp.Close(); err != nil { t.Error(err) } } func TestProducerWithCheckerFunction(t *testing.T) { trm := newTestReporterMock() mp := NewAsyncProducer(trm, nil). ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")). ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if err := mp.Close(); err != nil { t.Error(err) } if len(mp.Errors()) != 1 { t.Error("Expected to report an error") } err1 := <-mp.Errors() if !strings.HasPrefix(err1.Err.Error(), "No match") { t.Error("Expected to report a value check error, found: ", err1.Err) } } func TestProducerWithBrokenPartitioner(t *testing.T) { trm := newTestReporterMock() config := NewTestConfig() config.Producer.Partitioner = func(string) sarama.Partitioner { return brokePartitioner{} } mp := NewAsyncProducer(trm, config) mp.ExpectInputWithMessageCheckerFunctionAndSucceed(func(msg *sarama.ProducerMessage) error { if msg.Partition != 15 { t.Error("Expected partition 15, found: ", msg.Partition) } if msg.Topic != "test" { t.Errorf(`Expected topic "test", found: %q`, msg.Topic) } return nil }) mp.ExpectInputAndSucceed() // should actually fail in partitioning mp.Input() <- &sarama.ProducerMessage{Topic: "test"} mp.Input() <- &sarama.ProducerMessage{Topic: "not-test"} if err := mp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 || !strings.Contains(trm.errors[0], "partitioning unavailable") { t.Error("Expected to report partitioning unavailable, found", trm.errors) } } // brokeProducer refuses to partition anything not on the “test” topic, and sends everything on // that topic to partition 15. type brokePartitioner struct{} func (brokePartitioner) Partition(msg *sarama.ProducerMessage, n int32) (int32, error) { if msg.Topic == "test" { return 15, nil } return 0, errors.New("partitioning unavailable") } func (brokePartitioner) RequiresConsistency() bool { return false } func TestProducerWithInvalidConfiguration(t *testing.T) { trm := newTestReporterMock() config := NewTestConfig() config.Version = sarama.V0_11_0_2 config.ClientID = "not a valid producer ID" mp := NewAsyncProducer(trm, config) if err := mp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report a single error") } else if !strings.Contains(trm.errors[0], `ClientID value "not a valid producer ID" is not valid for Kafka versions before 1.0.0`) { t.Errorf("Unexpected error: %s", trm.errors[0]) } } golang-github-ibm-sarama-1.43.2/mocks/consumer.go000066400000000000000000000326331461256741300216550ustar00rootroot00000000000000package mocks import ( "sync" "sync/atomic" "github.com/IBM/sarama" ) // Consumer implements sarama's Consumer interface for testing purposes. // Before you can start consuming from this consumer, you have to register // topic/partitions using ExpectConsumePartition, and set expectations on them. type Consumer struct { l sync.Mutex t ErrorReporter config *sarama.Config partitionConsumers map[string]map[int32]*PartitionConsumer metadata map[string][]int32 } // NewConsumer returns a new mock Consumer instance. The t argument should // be the *testing.T instance of your test method. An error will be written to it if // an expectation is violated. The config argument can be set to nil; if it is // non-nil it is validated. func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { if config == nil { config = sarama.NewConfig() } if err := config.Validate(); err != nil { t.Errorf("Invalid mock configuration provided: %s", err.Error()) } c := &Consumer{ t: t, config: config, partitionConsumers: make(map[string]map[int32]*PartitionConsumer), } return c } /////////////////////////////////////////////////// // Consumer interface implementation /////////////////////////////////////////////////// // ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. // Before you can start consuming a partition, you have to set expectations on it using // ExpectConsumePartition. You can only consume a partition once per consumer. func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { c.l.Lock() defer c.l.Unlock() if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { c.t.Errorf("No expectations set for %s/%d", topic, partition) return nil, errOutOfExpectations } pc := c.partitionConsumers[topic][partition] if pc.consumed { return nil, sarama.ConfigurationError("The topic/partition is already being consumed") } if pc.offset != AnyOffset && pc.offset != offset { c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) } pc.consumed = true return pc, nil } // Topics returns a list of topics, as registered with SetTopicMetadata func (c *Consumer) Topics() ([]string, error) { c.l.Lock() defer c.l.Unlock() if c.metadata == nil { c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetTopicMetadata.") return nil, sarama.ErrOutOfBrokers } var result []string for topic := range c.metadata { result = append(result, topic) } return result, nil } // Partitions returns the list of parititons for the given topic, as registered with SetTopicMetadata func (c *Consumer) Partitions(topic string) ([]int32, error) { c.l.Lock() defer c.l.Unlock() if c.metadata == nil { c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetTopicMetadata.") return nil, sarama.ErrOutOfBrokers } if c.metadata[topic] == nil { return nil, sarama.ErrUnknownTopicOrPartition } return c.metadata[topic], nil } func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { c.l.Lock() defer c.l.Unlock() hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) for topic, partitionConsumers := range c.partitionConsumers { hwm := make(map[int32]int64, len(partitionConsumers)) for partition, pc := range partitionConsumers { hwm[partition] = pc.HighWaterMarkOffset() } hwms[topic] = hwm } return hwms } // Close implements the Close method from the sarama.Consumer interface. It will close // all registered PartitionConsumer instances. func (c *Consumer) Close() error { c.l.Lock() defer c.l.Unlock() for _, partitions := range c.partitionConsumers { for _, partitionConsumer := range partitions { _ = partitionConsumer.Close() } } return nil } // Pause implements Consumer. func (c *Consumer) Pause(topicPartitions map[string][]int32) { c.l.Lock() defer c.l.Unlock() for topic, partitions := range topicPartitions { for _, partition := range partitions { if topicConsumers, ok := c.partitionConsumers[topic]; ok { if partitionConsumer, ok := topicConsumers[partition]; ok { partitionConsumer.Pause() } } } } } // Resume implements Consumer. func (c *Consumer) Resume(topicPartitions map[string][]int32) { c.l.Lock() defer c.l.Unlock() for topic, partitions := range topicPartitions { for _, partition := range partitions { if topicConsumers, ok := c.partitionConsumers[topic]; ok { if partitionConsumer, ok := topicConsumers[partition]; ok { partitionConsumer.Resume() } } } } } // PauseAll implements Consumer. func (c *Consumer) PauseAll() { c.l.Lock() defer c.l.Unlock() for _, partitions := range c.partitionConsumers { for _, partitionConsumer := range partitions { partitionConsumer.Pause() } } } // ResumeAll implements Consumer. func (c *Consumer) ResumeAll() { c.l.Lock() defer c.l.Unlock() for _, partitions := range c.partitionConsumers { for _, partitionConsumer := range partitions { partitionConsumer.Resume() } } } /////////////////////////////////////////////////// // Expectation API /////////////////////////////////////////////////// // SetTopicMetadata sets the clusters topic/partition metadata, // which will be returned by Topics() and Partitions(). func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { c.l.Lock() defer c.l.Unlock() c.metadata = metadata } // ExpectConsumePartition will register a topic/partition, so you can set expectations on it. // The registered PartitionConsumer will be returned, so you can set expectations // on it using method chaining. Once a topic/partition is registered, you are // expected to start consuming it using ConsumePartition. If that doesn't happen, // an error will be written to the error reporter once the mock consumer is closed. It also expects // that the message and error channels be written with YieldMessage and YieldError accordingly, // and be fully consumed once the mock consumer is closed if ExpectMessagesDrainedOnClose or // ExpectErrorsDrainedOnClose have been called. func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { c.l.Lock() defer c.l.Unlock() if c.partitionConsumers[topic] == nil { c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) } if c.partitionConsumers[topic][partition] == nil { highWatermarkOffset := offset if offset == sarama.OffsetOldest { highWatermarkOffset = 0 } c.partitionConsumers[topic][partition] = &PartitionConsumer{ highWaterMarkOffset: highWatermarkOffset, t: c.t, topic: topic, partition: partition, offset: offset, messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), suppressedMessages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), } } return c.partitionConsumers[topic][partition] } /////////////////////////////////////////////////// // PartitionConsumer mock type /////////////////////////////////////////////////// // PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. // It is returned by the mock Consumers ConsumePartitionMethod, but only if it is // registered first using the Consumer's ExpectConsumePartition method. Before consuming the // Errors and Messages channel, you should specify what values will be provided on these // channels using YieldMessage and YieldError. type PartitionConsumer struct { highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG suppressedHighWaterMarkOffset int64 l sync.Mutex t ErrorReporter topic string partition int32 offset int64 messages chan *sarama.ConsumerMessage suppressedMessages chan *sarama.ConsumerMessage errors chan *sarama.ConsumerError singleClose sync.Once consumed bool errorsShouldBeDrained bool messagesShouldBeDrained bool paused bool } /////////////////////////////////////////////////// // PartitionConsumer interface implementation /////////////////////////////////////////////////// // AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) AsyncClose() { pc.singleClose.Do(func() { close(pc.suppressedMessages) close(pc.messages) close(pc.errors) }) } // Close implements the Close method from the sarama.PartitionConsumer interface. It will // verify whether the partition consumer was actually started. func (pc *PartitionConsumer) Close() error { if !pc.consumed { pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) return errPartitionConsumerNotStarted } if pc.errorsShouldBeDrained && len(pc.errors) > 0 { pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) } if pc.messagesShouldBeDrained && len(pc.messages) > 0 { pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) } pc.AsyncClose() var ( closeErr error wg sync.WaitGroup ) wg.Add(1) go func() { defer wg.Done() errs := make(sarama.ConsumerErrors, 0) for err := range pc.errors { errs = append(errs, err) } if len(errs) > 0 { closeErr = errs } }() wg.Add(1) go func() { defer wg.Done() for range pc.messages { // drain } }() wg.Add(1) go func() { defer wg.Done() for range pc.suppressedMessages { // drain } }() wg.Wait() return closeErr } // Errors implements the Errors method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { return pc.errors } // Messages implements the Messages method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { return pc.messages } func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { return atomic.LoadInt64(&pc.highWaterMarkOffset) } // Pause implements the Pause method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) Pause() { pc.l.Lock() defer pc.l.Unlock() pc.suppressedHighWaterMarkOffset = atomic.LoadInt64(&pc.highWaterMarkOffset) pc.paused = true } // Resume implements the Resume method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) Resume() { pc.l.Lock() defer pc.l.Unlock() pc.highWaterMarkOffset = atomic.LoadInt64(&pc.suppressedHighWaterMarkOffset) for len(pc.suppressedMessages) > 0 { msg := <-pc.suppressedMessages pc.messages <- msg } pc.paused = false } // IsPaused implements the IsPaused method from the sarama.PartitionConsumer interface. func (pc *PartitionConsumer) IsPaused() bool { pc.l.Lock() defer pc.l.Unlock() return pc.paused } /////////////////////////////////////////////////// // Expectation API /////////////////////////////////////////////////// // YieldMessage will yield a messages Messages channel of this partition consumer // when it is consumed. By default, the mock consumer will not verify whether this // message was consumed from the Messages channel, because there are legitimate // reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will // verify that the channel is empty on close. func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) *PartitionConsumer { pc.l.Lock() defer pc.l.Unlock() msg.Topic = pc.topic msg.Partition = pc.partition if pc.paused { msg.Offset = atomic.AddInt64(&pc.suppressedHighWaterMarkOffset, 1) - 1 pc.suppressedMessages <- msg } else { msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - 1 pc.messages <- msg } return pc } // YieldError will yield an error on the Errors channel of this partition consumer // when it is consumed. By default, the mock consumer will not verify whether this error was // consumed from the Errors channel, because there are legitimate reasons for this // not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that // the channel is empty on close. func (pc *PartitionConsumer) YieldError(err error) *PartitionConsumer { pc.errors <- &sarama.ConsumerError{ Topic: pc.topic, Partition: pc.partition, Err: err, } return pc } // ExpectMessagesDrainedOnClose sets an expectation on the partition consumer // that the messages channel will be fully drained when Close is called. If this // expectation is not met, an error is reported to the error reporter. func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() *PartitionConsumer { pc.messagesShouldBeDrained = true return pc } // ExpectErrorsDrainedOnClose sets an expectation on the partition consumer // that the errors channel will be fully drained when Close is called. If this // expectation is not met, an error is reported to the error reporter. func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() *PartitionConsumer { pc.errorsShouldBeDrained = true return pc } golang-github-ibm-sarama-1.43.2/mocks/consumer_test.go000066400000000000000000000320321461256741300227050ustar00rootroot00000000000000package mocks import ( "errors" "sort" "strings" "testing" "github.com/IBM/sarama" ) func TestMockConsumerImplementsConsumerInterface(t *testing.T) { var c interface{} = &Consumer{} if _, ok := c.(sarama.Consumer); !ok { t.Error("The mock consumer should implement the sarama.Consumer interface.") } var pc interface{} = &PartitionConsumer{} if _, ok := pc.(sarama.PartitionConsumer); !ok { t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.") } } func TestConsumerHandlesExpectations(t *testing.T) { consumer := NewConsumer(t, NewTestConfig()) defer func() { if err := consumer.Close(); err != nil { t.Error(err) } }() consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Fatal(err) } test0_msg := <-pc_test0.Messages() if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" { t.Error("Message was not as expected:", test0_msg) } test0_err := <-pc_test0.Errors() if !errors.Is(test0_err, sarama.ErrOutOfBrokers) { t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) } pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) if err != nil { t.Fatal(err) } test1_msg := <-pc_test1.Messages() if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { t.Error("Message was not as expected:", test1_msg) } pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) if err != nil { t.Fatal(err) } other0_msg := <-pc_other0.Messages() if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { t.Error("Message was not as expected:", other0_msg) } } func TestConsumerHandlesExpectationsPausingResuming(t *testing.T) { consumer := NewConsumer(t, NewTestConfig()) defer func() { if err := consumer.Close(); err != nil { t.Error(err) } }() consumePartitionT0P0 := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) consumePartitionT0P1 := consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest) consumePartitionT1P0 := consumer.ExpectConsumePartition("other", 0, AnyOffset) consumePartitionT0P0.Pause() consumePartitionT0P0.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) consumePartitionT0P0.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world x")}) consumePartitionT0P0.YieldError(sarama.ErrOutOfBrokers) consumePartitionT0P1.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) consumePartitionT1P0.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Fatal(err) } if len(pc_test0.Messages()) > 0 { t.Error("Problem to pause consumption") } test0_err := <-pc_test0.Errors() if !errors.Is(test0_err, sarama.ErrOutOfBrokers) { t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) } if pc_test0.HighWaterMarkOffset() != 0 { t.Error("High water mark offset with value different from the expected: ", pc_test0.HighWaterMarkOffset()) } pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) if err != nil { t.Fatal(err) } test1_msg := <-pc_test1.Messages() if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { t.Error("Message was not as expected:", test1_msg) } if pc_test1.HighWaterMarkOffset() != 1 { t.Error("High water mark offset with value different from the expected: ", pc_test1.HighWaterMarkOffset()) } pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) if err != nil { t.Fatal(err) } other0_msg := <-pc_other0.Messages() if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { t.Error("Message was not as expected:", other0_msg) } if pc_other0.HighWaterMarkOffset() != AnyOffset+1 { t.Error("High water mark offset with value different from the expected: ", pc_other0.HighWaterMarkOffset()) } pc_test0.Resume() test0_msg1 := <-pc_test0.Messages() if test0_msg1.Topic != "test" || test0_msg1.Partition != 0 || string(test0_msg1.Value) != "hello world" || test0_msg1.Offset != 0 { t.Error("Message was not as expected:", test0_msg1) } test0_msg2 := <-pc_test0.Messages() if test0_msg2.Topic != "test" || test0_msg2.Partition != 0 || string(test0_msg2.Value) != "hello world x" || test0_msg2.Offset != 1 { t.Error("Message was not as expected:", test0_msg2) } if pc_test0.HighWaterMarkOffset() != 2 { t.Error("High water mark offset with value different from the expected: ", pc_test0.HighWaterMarkOffset()) } } func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) { consumer := NewConsumer(t, NewTestConfig()) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Fatal(err) } select { case <-pc.Messages(): t.Error("Did not expect a message on the messages channel.") case err := <-pc.Errors(): if !errors.Is(err, sarama.ErrOutOfBrokers) { t.Error("Expected sarama.ErrOutOfBrokers, found", err) } } var errs sarama.ConsumerErrors if !errors.As(pc.Close(), &errs) { t.Error("Expected Close to return ConsumerErrors") } if len(errs) != 1 && !errors.Is(errs[0], sarama.ErrOutOfBrokers) { t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers") } } func TestConsumerWithoutExpectationsOnPartition(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) if !errors.Is(err, errOutOfExpectations) { t.Error("Expected ConsumePartition to return errOutOfExpectations") } if err := consumer.Close(); err != nil { t.Error("No error expected on close, but found:", err) } if len(trm.errors) != 1 { t.Errorf("Expected an expectation failure to be set on the error reporter.") } } func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) if err := consumer.Close(); err != nil { t.Error("No error expected on close, but found:", err) } if len(trm.errors) != 1 { t.Errorf("Expected an expectation failure to be set on the error reporter.") } } func TestConsumerWithWrongOffsetExpectation(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest) if err != nil { t.Error("Did not expect error, found:", err) } if len(trm.errors) != 1 { t.Errorf("Expected an expectation failure to be set on the error reporter.") } if err := consumer.Close(); err != nil { t.Error(err) } } func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest). YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}). YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}). ExpectMessagesDrainedOnClose() pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Error(err) } // consume first message, not second one <-pc.Messages() if err := consumer.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Errorf("Expected an expectation failure to be set on the error reporter.") } } func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest). YieldError(sarama.ErrInvalidMessage). YieldError(sarama.ErrInvalidMessage). ExpectErrorsDrainedOnClose() pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Error(err) } // consume first and second error, <-pc.Errors() <-pc.Errors() if err := consumer.Close(); err != nil { t.Error(err) } if len(trm.errors) != 0 { t.Errorf("Expected no expectation failures to be set on the error reporter.") } } func TestConsumerTopicMetadata(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) consumer.SetTopicMetadata(map[string][]int32{ "test1": {0, 1, 2, 3}, "test2": {0, 1, 2, 3, 4, 5, 6, 7}, }) topics, err := consumer.Topics() if err != nil { t.Error(t) } sortedTopics := sort.StringSlice(topics) sortedTopics.Sort() if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" { t.Error("Unexpected topics returned:", sortedTopics) } partitions1, err := consumer.Partitions("test1") if err != nil { t.Error(t) } if len(partitions1) != 4 { t.Error("Unexpected partitions returned:", len(partitions1)) } partitions2, err := consumer.Partitions("test2") if err != nil { t.Error(t) } if len(partitions2) != 8 { t.Error("Unexpected partitions returned:", len(partitions2)) } if len(trm.errors) != 0 { t.Errorf("Expected no expectation failures to be set on the error reporter.") } } func TestConsumerUnexpectedTopicMetadata(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) if _, err := consumer.Topics(); !errors.Is(err, sarama.ErrOutOfBrokers) { t.Error("Expected sarama.ErrOutOfBrokers, found", err) } if len(trm.errors) != 1 { t.Errorf("Expected an expectation failure to be set on the error reporter.") } } func TestConsumerOffsetsAreManagedCorrectlyWithOffsetOldest(t *testing.T) { trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) pcmock.ExpectMessagesDrainedOnClose() pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) if err != nil { t.Error(err) } message1 := <-pc.Messages() if message1.Offset != 0 { t.Errorf("Expected offset of first message in the partition to be 0, got %d", message1.Offset) } message2 := <-pc.Messages() if message2.Offset != 1 { t.Errorf("Expected offset of second message in the partition to be 1, got %d", message2.Offset) } if err := consumer.Close(); err != nil { t.Error(err) } if len(trm.errors) != 0 { t.Errorf("Expected to not report any errors, found: %v", trm.errors) } } func TestConsumerOffsetsAreManagedCorrectlyWithSpecifiedOffset(t *testing.T) { startingOffset := int64(123) trm := newTestReporterMock() consumer := NewConsumer(trm, NewTestConfig()) pcmock := consumer.ExpectConsumePartition("test", 0, startingOffset) pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) pcmock.ExpectMessagesDrainedOnClose() pc, err := consumer.ConsumePartition("test", 0, startingOffset) if err != nil { t.Error(err) } message1 := <-pc.Messages() if message1.Offset != startingOffset { t.Errorf("Expected offset of first message to be %d, got %d", startingOffset, message1.Offset) } message2 := <-pc.Messages() if message2.Offset != startingOffset+1 { t.Errorf("Expected offset of second message to be %d, got %d", startingOffset+1, message2.Offset) } if err := consumer.Close(); err != nil { t.Error(err) } if len(trm.errors) != 0 { t.Errorf("Expected to not report any errors, found: %v", trm.errors) } if pc.HighWaterMarkOffset() != message2.Offset+1 { diff := pc.HighWaterMarkOffset() - message2.Offset t.Errorf("Difference between highwatermarkoffset and last message offset greater than 1, got: %v", diff) } } func TestConsumerInvalidConfiguration(t *testing.T) { trm := newTestReporterMock() config := NewTestConfig() config.Version = sarama.V0_11_0_2 config.ClientID = "not a valid consumer ID" consumer := NewConsumer(trm, config) if err := consumer.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report a single error") } else if !strings.Contains(trm.errors[0], `ClientID value "not a valid consumer ID" is not valid for Kafka versions before 1.0.0`) { t.Errorf("Unexpected error: %s", trm.errors[0]) } } golang-github-ibm-sarama-1.43.2/mocks/mocks.go000066400000000000000000000067361461256741300211430ustar00rootroot00000000000000/* Package mocks provides mocks that can be used for testing applications that use Sarama. The mock types provided by this package implement the interfaces Sarama exports, so you can use them for dependency injection in your tests. All mock instances require you to set expectations on them before you can use them. It will determine how the mock will behave. If an expectation is not met, it will make your test fail. NOTE: this package currently does not fall under the API stability guarantee of Sarama as it is still considered experimental. */ package mocks import ( "errors" "fmt" "github.com/IBM/sarama" ) // ErrorReporter is a simple interface that includes the testing.T methods we use to report // expectation violations when using the mock objects. type ErrorReporter interface { Errorf(string, ...interface{}) } // ValueChecker is a function type to be set in each expectation of the producer mocks // to check the value passed. type ValueChecker func(val []byte) error // MessageChecker is a function type to be set in each expectation of the producer mocks // to check the message passed. type MessageChecker func(*sarama.ProducerMessage) error // messageValueChecker wraps a ValueChecker into a MessageChecker. // Failure to encode the message value will return an error and not call // the wrapped ValueChecker. func messageValueChecker(f ValueChecker) MessageChecker { if f == nil { return nil } return func(msg *sarama.ProducerMessage) error { val, err := msg.Value.Encode() if err != nil { return fmt.Errorf("Input message encoding failed: %w", err) } return f(val) } } var ( errProduceSuccess error = nil errOutOfExpectations = errors.New("No more expectations set on mock") errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") ) const AnyOffset int64 = -1000 type producerExpectation struct { Result error CheckFunction MessageChecker } // TopicConfig describes a mock topic structure for the mock producers’ partitioning needs. type TopicConfig struct { overridePartitions map[string]int32 defaultPartitions int32 } // NewTopicConfig makes a configuration which defaults to 32 partitions for every topic. func NewTopicConfig() *TopicConfig { return &TopicConfig{ overridePartitions: make(map[string]int32, 0), defaultPartitions: 32, } } // SetDefaultPartitions sets the number of partitions any topic not explicitly configured otherwise // (by SetPartitions) will have from the perspective of created partitioners. func (pc *TopicConfig) SetDefaultPartitions(n int32) { pc.defaultPartitions = n } // SetPartitions sets the number of partitions the partitioners will see for specific topics. This // only applies to messages produced after setting them. func (pc *TopicConfig) SetPartitions(partitions map[string]int32) { for p, n := range partitions { pc.overridePartitions[p] = n } } func (pc *TopicConfig) partitions(topic string) int32 { if n, found := pc.overridePartitions[topic]; found { return n } return pc.defaultPartitions } // NewTestConfig returns a config meant to be used by tests. // Due to inconsistencies with the request versions the clients send using the default Kafka version // and the response versions our mocks use, we default to the minimum Kafka version in most tests func NewTestConfig() *sarama.Config { config := sarama.NewConfig() config.Consumer.Retry.Backoff = 0 config.Producer.Retry.Backoff = 0 config.Version = sarama.MinVersion return config } golang-github-ibm-sarama-1.43.2/mocks/sync_producer.go000066400000000000000000000232121461256741300226720ustar00rootroot00000000000000package mocks import ( "errors" "sync" "github.com/IBM/sarama" ) // SyncProducer implements sarama's SyncProducer interface for testing purposes. // Before you can use it, you have to set expectations on the mock SyncProducer // to tell it how to handle calls to SendMessage, so you can easily test success // and failure scenarios. type SyncProducer struct { l sync.Mutex t ErrorReporter expectations []*producerExpectation lastOffset int64 *TopicConfig newPartitioner sarama.PartitionerConstructor partitioners map[string]sarama.Partitioner isTransactional bool txnLock sync.Mutex txnStatus sarama.ProducerTxnStatusFlag } // NewSyncProducer instantiates a new SyncProducer mock. The t argument should // be the *testing.T instance of your test method. An error will be written to it if // an expectation is violated. The config argument is validated and used to handle // partitioning. func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { if config == nil { config = sarama.NewConfig() } if err := config.Validate(); err != nil { t.Errorf("Invalid mock configuration provided: %s", err.Error()) } return &SyncProducer{ t: t, expectations: make([]*producerExpectation, 0), TopicConfig: NewTopicConfig(), newPartitioner: config.Producer.Partitioner, partitioners: make(map[string]sarama.Partitioner, 1), isTransactional: config.Producer.Transaction.ID != "", txnStatus: sarama.ProducerTxnFlagReady, } } //////////////////////////////////////////////// // Implement SyncProducer interface //////////////////////////////////////////////// // SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. // You have to set expectations on the mock producer before calling SendMessage, so it knows // how to handle them. You can set a function in each expectation so that the message value // checked by this function and an error is returned if the match fails. // If there is no more remaining expectation when SendMessage is called, // the mock producer will write an error to the test state object. func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { sp.l.Lock() defer sp.l.Unlock() if sp.IsTransactional() && sp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { sp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") return -1, -1, errors.New("attempt to send message when transaction is not started or is in ending state") } if len(sp.expectations) > 0 { expectation := sp.expectations[0] sp.expectations = sp.expectations[1:] topic := msg.Topic partition, err := sp.partitioner(topic).Partition(msg, sp.partitions(topic)) if err != nil { sp.t.Errorf("Partitioner returned an error: %s", err.Error()) return -1, -1, err } msg.Partition = partition if expectation.CheckFunction != nil { errCheck := expectation.CheckFunction(msg) if errCheck != nil { sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) return -1, -1, errCheck } } if errors.Is(expectation.Result, errProduceSuccess) { sp.lastOffset++ msg.Offset = sp.lastOffset return 0, msg.Offset, nil } return -1, -1, expectation.Result } sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") return -1, -1, errOutOfExpectations } // SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. // You have to set expectations on the mock producer before calling SendMessages, so it knows // how to handle them. If there is no more remaining expectations when SendMessages is called, // the mock producer will write an error to the test state object. func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { sp.l.Lock() defer sp.l.Unlock() if len(sp.expectations) >= len(msgs) { expectations := sp.expectations[0:len(msgs)] sp.expectations = sp.expectations[len(msgs):] for i, expectation := range expectations { topic := msgs[i].Topic partition, err := sp.partitioner(topic).Partition(msgs[i], sp.partitions(topic)) if err != nil { sp.t.Errorf("Partitioner returned an error: %s", err.Error()) return err } msgs[i].Partition = partition if expectation.CheckFunction != nil { errCheck := expectation.CheckFunction(msgs[i]) if errCheck != nil { sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) return errCheck } } if !errors.Is(expectation.Result, errProduceSuccess) { return expectation.Result } sp.lastOffset++ msgs[i].Offset = sp.lastOffset } return nil } sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") return errOutOfExpectations } func (sp *SyncProducer) partitioner(topic string) sarama.Partitioner { partitioner := sp.partitioners[topic] if partitioner == nil { partitioner = sp.newPartitioner(topic) sp.partitioners[topic] = partitioner } return partitioner } // Close corresponds with the Close method of sarama's SyncProducer implementation. // By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, // so it will write an error to the test state if there's any remaining expectations. func (sp *SyncProducer) Close() error { sp.l.Lock() defer sp.l.Unlock() if len(sp.expectations) > 0 { sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) } return nil } //////////////////////////////////////////////// // Setting expectations //////////////////////////////////////////////// // ExpectSendMessageWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer // that SendMessage will be called. The mock producer will first call the given function to check // the message. It will cascade the error of the function, if any, or handle the message as if it // produced successfully, i.e. by returning a valid partition, and offset, and a nil error. func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *SyncProducer { sp.l.Lock() defer sp.l.Unlock() sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) return sp } // ExpectSendMessageWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that // SendMessage will be called. The mock producer will first call the given function to check the // message. It will cascade the error of the function, if any, or handle the message as if it // failed to produce successfully, i.e. by returning the provided error. func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *SyncProducer { sp.l.Lock() defer sp.l.Unlock() sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) return sp } // ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage // will be called. The mock producer will first call the given function to check the message value. // It will cascade the error of the function, if any, or handle the message as if it produced // successfully, i.e. by returning a valid partition, and offset, and a nil error. func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) *SyncProducer { sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) return sp } // ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be // called. The mock producer will first call the given function to check the message value. // It will cascade the error of the function, if any, or handle the message as if it failed // to produce successfully, i.e. by returning the provided error. func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) *SyncProducer { sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) return sp } // ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be // called. The mock producer will handle the message as if it produced successfully, i.e. by // returning a valid partition, and offset, and a nil error. func (sp *SyncProducer) ExpectSendMessageAndSucceed() *SyncProducer { sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(nil) return sp } // ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be // called. The mock producer will handle the message as if it failed to produce // successfully, i.e. by returning the provided error. func (sp *SyncProducer) ExpectSendMessageAndFail(err error) *SyncProducer { sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(nil, err) return sp } func (sp *SyncProducer) IsTransactional() bool { return sp.isTransactional } func (sp *SyncProducer) BeginTxn() error { sp.txnLock.Lock() defer sp.txnLock.Unlock() sp.txnStatus = sarama.ProducerTxnFlagInTransaction return nil } func (sp *SyncProducer) CommitTxn() error { sp.txnLock.Lock() defer sp.txnLock.Unlock() sp.txnStatus = sarama.ProducerTxnFlagReady return nil } func (sp *SyncProducer) AbortTxn() error { sp.txnLock.Lock() defer sp.txnLock.Unlock() sp.txnStatus = sarama.ProducerTxnFlagReady return nil } func (sp *SyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { return sp.txnStatus } func (sp *SyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { return nil } func (sp *SyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { return nil } golang-github-ibm-sarama-1.43.2/mocks/sync_producer_test.go000066400000000000000000000240641461256741300237370ustar00rootroot00000000000000package mocks import ( "errors" "strings" "testing" "github.com/IBM/sarama" ) func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) { var mp interface{} = &SyncProducer{} if _, ok := mp.(sarama.SyncProducer); !ok { t.Error("The mock async producer should implement the sarama.SyncProducer interface.") } } func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { sp := NewSyncProducer(t, nil) defer func() { if err := sp.Close(); err != nil { t.Error(err) } }() sp.ExpectSendMessageAndSucceed() sp.ExpectSendMessageAndSucceed() sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} _, offset, err := sp.SendMessage(msg) if err != nil { t.Errorf("The first message should have been produced successfully, but got %s", err) } if offset != 1 || offset != msg.Offset { t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) } _, offset, err = sp.SendMessage(msg) if err != nil { t.Errorf("The second message should have been produced successfully, but got %s", err) } if offset != 2 || offset != msg.Offset { t.Errorf("The second message should have been assigned offset 2, but got %d", offset) } _, _, err = sp.SendMessage(msg) if !errors.Is(err, sarama.ErrOutOfBrokers) { t.Errorf("The third message should not have been produced successfully") } if err := sp.Close(); err != nil { t.Error(err) } } func TestSyncProducerFailTxn(t *testing.T) { config := NewTestConfig() config.Producer.Transaction.ID = "test" config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = sarama.V0_11_0_0 tfm := newTestReporterMock() sp := NewSyncProducer(tfm, config) defer func() { if err := sp.Close(); err != nil { t.Error(err) } }() msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} _, _, err := sp.SendMessage(msg) if err == nil { t.Errorf("must have failed with txn begin error") } if len(tfm.errors) != 1 { t.Errorf("must have failed with txn begin error") } } func TestSyncProducerUseTxn(t *testing.T) { config := NewTestConfig() config.Producer.Transaction.ID = "test" config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Retry.Backoff = 0 config.Producer.Idempotent = true config.Net.MaxOpenRequests = 1 config.Version = sarama.V0_11_0_0 sp := NewSyncProducer(t, config) defer func() { if err := sp.Close(); err != nil { t.Error(err) } }() if !sp.IsTransactional() { t.Error("producer must be transactional") } sp.ExpectSendMessageAndSucceed() msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} err := sp.BeginTxn() if err != nil { t.Errorf("txn can't be started, got %s", err) } if sp.TxnStatus()&sarama.ProducerTxnFlagInTransaction == 0 { t.Error("transaction must be started") } _, offset, err := sp.SendMessage(msg) if err != nil { t.Errorf("The first message should have been produced successfully, but got %s", err) } if offset != 1 || offset != msg.Offset { t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) } if err := sp.AddMessageToTxn(&sarama.ConsumerMessage{ Topic: "original-topic", Partition: 0, Offset: 123, }, "test-group", nil); err != nil { t.Error(err) } if err := sp.AddOffsetsToTxn(map[string][]*sarama.PartitionOffsetMetadata{ "original-topic": { { Partition: 1, Offset: 321, }, }, }, "test-group"); err != nil { t.Error(err) } err = sp.CommitTxn() if err != nil { t.Errorf("txn can't be committed, got %s", err) } if err := sp.Close(); err != nil { t.Error(err) } } func TestSyncProducerWithTooManyExpectations(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageAndSucceed(). ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if _, _, err := sp.SendMessage(msg); err != nil { t.Error("No error expected on first SendMessage call", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestSyncProducerWithTooFewExpectations(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil).ExpectSendMessageAndSucceed() msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if _, _, err := sp.SendMessage(msg); err != nil { t.Error("No error expected on first SendMessage call", err) } if _, _, err := sp.SendMessage(msg); !errors.Is(err, errOutOfExpectations) { t.Error("errOutOfExpectations expected on second SendMessage call, found:", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestSyncProducerWithCheckerFunction(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if _, _, err := sp.SendMessage(msg); err != nil { t.Error("No error expected on first SendMessage call, found: ", err) } msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") { t.Error("Error during value check expected on second SendMessage call, found:", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestSyncProducerWithCheckerFunctionForSendMessagesWithError(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msgs := []*sarama.ProducerMessage{msg1, msg2} if err := sp.SendMessages(msgs); err == nil || !strings.HasPrefix(err.Error(), "No match") { t.Error("Error during value check expected on second message, found: ", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } } func TestSyncProducerWithCheckerFunctionForSendMessagesWithoutError(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msgs := []*sarama.ProducerMessage{msg1} if err := sp.SendMessages(msgs); err != nil { t.Error("No error expected on SendMessages call, found: ", err) } for i, msg := range msgs { offset := int64(i + 1) if offset != msg.Offset { t.Errorf("The message should have been assigned offset %d, but got %d", offset, msg.Offset) } } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 0 { t.Errorf("Expected to not report any errors, found: %v", trm.errors) } } func TestSyncProducerSendMessagesExpectationsMismatchTooFew(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msgs := []*sarama.ProducerMessage{msg1, msg2} if err := sp.SendMessages(msgs); err == nil { t.Error("Error during value check expected on second message, found: ", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 2 { t.Error("Expected to report 2 errors") } } func TestSyncProducerSendMessagesExpectationsMismatchTooMany(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} msgs := []*sarama.ProducerMessage{msg1} if err := sp.SendMessages(msgs); err != nil { t.Error("No error expected on SendMessages call, found: ", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report 1 errors") } } func TestSyncProducerSendMessagesFaultyEncoder(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil). ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) msg1 := &sarama.ProducerMessage{Topic: "test", Value: faultyEncoder("123")} msgs := []*sarama.ProducerMessage{msg1} if err := sp.SendMessages(msgs); err == nil || !strings.Contains(err.Error(), "encode error") { t.Error("Encoding error expected, found: ", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report 1 errors") } } type faultyEncoder []byte func (f faultyEncoder) Encode() ([]byte, error) { return nil, errors.New("encode error") } func (f faultyEncoder) Length() int { return len(f) } func TestSyncProducerInvalidConfiguration(t *testing.T) { trm := newTestReporterMock() config := NewTestConfig() config.Version = sarama.V0_11_0_2 config.ClientID = "not a valid producer ID" mp := NewSyncProducer(trm, config) if err := mp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report a single error") } else if !strings.Contains(trm.errors[0], `ClientID value "not a valid producer ID" is not valid for Kafka versions before 1.0.0`) { t.Errorf("Unexpected error: %s", trm.errors[0]) } } golang-github-ibm-sarama-1.43.2/offset_commit_request.go000066400000000000000000000146661461256741300233220ustar00rootroot00000000000000package sarama import "errors" // ReceiveTime is a special value for the timestamp field of Offset Commit Requests which // tells the broker to set the timestamp to the time at which the request was received. // The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. const ReceiveTime int64 = -1 // GroupGenerationUndefined is a special value for the group generation field of // Offset Commit Requests that should be used when a consumer group does not rely // on Kafka for partition management. const GroupGenerationUndefined = -1 type offsetCommitRequestBlock struct { offset int64 timestamp int64 committedLeaderEpoch int32 metadata string } func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { pe.putInt64(b.offset) if version == 1 { pe.putInt64(b.timestamp) } else if b.timestamp != 0 { Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") } if version >= 6 { pe.putInt32(b.committedLeaderEpoch) } return pe.putString(b.metadata) } func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { if b.offset, err = pd.getInt64(); err != nil { return err } if version == 1 { if b.timestamp, err = pd.getInt64(); err != nil { return err } } if version >= 6 { if b.committedLeaderEpoch, err = pd.getInt32(); err != nil { return err } } b.metadata, err = pd.getString() return err } type OffsetCommitRequest struct { ConsumerGroup string ConsumerGroupGeneration int32 // v1 or later ConsumerID string // v1 or later GroupInstanceId *string // v7 or later RetentionTime int64 // v2 or later // Version can be: // - 0 (kafka 0.8.1 and later) // - 1 (kafka 0.8.2 and later) // - 2 (kafka 0.9.0 and later) // - 3 (kafka 0.11.0 and later) // - 4 (kafka 2.0.0 and later) // - 5&6 (kafka 2.1.0 and later) // - 7 (kafka 2.3.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } func (r *OffsetCommitRequest) encode(pe packetEncoder) error { if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} } if err := pe.putString(r.ConsumerGroup); err != nil { return err } if r.Version >= 1 { pe.putInt32(r.ConsumerGroupGeneration) if err := pe.putString(r.ConsumerID); err != nil { return err } } else { if r.ConsumerGroupGeneration != 0 { Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") } if r.ConsumerID != "" { Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") } } // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. if r.Version >= 2 && r.Version <= 4 { pe.putInt64(r.RetentionTime) } else if r.RetentionTime != 0 { Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") } if r.Version >= 7 { if err := pe.putNullableString(r.GroupInstanceId); err != nil { return err } } if err := pe.putArrayLength(len(r.blocks)); err != nil { return err } for topic, partitions := range r.blocks { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(partitions)); err != nil { return err } for partition, block := range partitions { pe.putInt32(partition) if err := block.encode(pe, r.Version); err != nil { return err } } } return nil } func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.ConsumerGroup, err = pd.getString(); err != nil { return err } if r.Version >= 1 { if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { return err } if r.ConsumerID, err = pd.getString(); err != nil { return err } } // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. if r.Version >= 2 && r.Version <= 4 { if r.RetentionTime, err = pd.getInt64(); err != nil { return err } } if r.Version >= 7 { if r.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } } topicCount, err := pd.getArrayLength() if err != nil { return err } if topicCount == 0 { return nil } r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { return err } partitionCount, err := pd.getArrayLength() if err != nil { return err } r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } block := &offsetCommitRequestBlock{} if err := block.decode(pd, r.Version); err != nil { return err } r.blocks[topic][partition] = block } } return nil } func (r *OffsetCommitRequest) key() int16 { return 8 } func (r *OffsetCommitRequest) version() int16 { return r.Version } func (r *OffsetCommitRequest) headerVersion() int16 { return 1 } func (r *OffsetCommitRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_3_0_0 case 5, 6: return V2_1_0_0 case 4: return V2_0_0_0 case 3: return V0_11_0_0 case 2: return V0_9_0_0 case 0, 1: return V0_8_2_0 default: return V2_4_0_0 } } func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) } func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) } r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata} } func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { partitions := r.blocks[topic] if partitions == nil { return 0, "", errors.New("no such offset") } block := partitions[partitionID] if block == nil { return 0, "", errors.New("no such offset") } return block.offset, block.metadata, nil } golang-github-ibm-sarama-1.43.2/offset_commit_request_test.go000066400000000000000000000137511461256741300243530ustar00rootroot00000000000000package sarama import ( "fmt" "reflect" "testing" ) var ( offsetCommitRequestNoBlocksV0 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x00, 0x00, } offsetCommitRequestNoBlocksV1 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x11, 0x22, 0x00, 0x04, 'c', 'o', 'n', 's', 0x00, 0x00, 0x00, 0x00, } offsetCommitRequestNoBlocksV2 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x11, 0x22, 0x00, 0x04, 'c', 'o', 'n', 's', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, 0x00, 0x00, 0x00, 0x00, } offsetCommitRequestOneBlockV0 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x52, 0x21, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a', } offsetCommitRequestOneBlockV1 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x11, 0x22, 0x00, 0x04, 'c', 'o', 'n', 's', 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x52, 0x21, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a', } offsetCommitRequestOneBlockV2 = []byte{ 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', 0x00, 0x00, 0x11, 0x22, 0x00, 0x04, 'c', 'o', 'n', 's', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x52, 0x21, 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a', } ) func TestOffsetCommitRequestV0(t *testing.T) { request := new(OffsetCommitRequest) request.Version = 0 request.ConsumerGroup = "foobar" testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) } func TestOffsetCommitRequestV1(t *testing.T) { request := new(OffsetCommitRequest) request.ConsumerGroup = "foobar" request.ConsumerID = "cons" request.ConsumerGroupGeneration = 0x1122 request.Version = 1 testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) } func TestOffsetCommitRequestV2ToV4(t *testing.T) { for version := 2; version <= 4; version++ { request := new(OffsetCommitRequest) request.ConsumerGroup = "foobar" request.ConsumerID = "cons" request.ConsumerGroupGeneration = 0x1122 request.RetentionTime = 0x4433 request.Version = int16(version) testRequest(t, fmt.Sprintf("no blocks v%d", version), request, offsetCommitRequestNoBlocksV2) request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") testRequest(t, fmt.Sprintf("one block v%d", version), request, offsetCommitRequestOneBlockV2) } } var ( offsetCommitRequestOneBlockV5 = []byte{ 0, 3, 'f', 'o', 'o', // GroupId 0x00, 0x00, 0x00, 0x01, // GenerationId 0, 3, 'm', 'i', 'd', // MemberId 0, 0, 0, 1, // One Topic 0, 5, 't', 'o', 'p', 'i', 'c', // Name 0, 0, 0, 1, // One Partition 0, 0, 0, 1, // PartitionIndex 0, 0, 0, 0, 0, 0, 0, 2, // CommittedOffset 0, 4, 'm', 'e', 't', 'a', // CommittedMetadata } offsetCommitRequestOneBlockV6 = []byte{ 0, 3, 'f', 'o', 'o', // GroupId 0x00, 0x00, 0x00, 0x01, // GenerationId 0, 3, 'm', 'i', 'd', // MemberId 0, 0, 0, 1, // One Topic 0, 5, 't', 'o', 'p', 'i', 'c', // Name 0, 0, 0, 1, // One Partition 0, 0, 0, 1, // PartitionIndex 0, 0, 0, 0, 0, 0, 0, 2, // CommittedOffset 0, 0, 0, 3, // CommittedEpoch 0, 4, 'm', 'e', 't', 'a', // CommittedMetadata } offsetCommitRequestOneBlockV7 = []byte{ 0, 3, 'f', 'o', 'o', // GroupId 0x00, 0x00, 0x00, 0x01, // GenerationId 0, 3, 'm', 'i', 'd', // MemberId 0, 3, 'g', 'i', 'd', // MemberId 0, 0, 0, 1, // One Topic 0, 5, 't', 'o', 'p', 'i', 'c', // Name 0, 0, 0, 1, // One Partition 0, 0, 0, 1, // PartitionIndex 0, 0, 0, 0, 0, 0, 0, 2, // CommittedOffset 0, 0, 0, 3, // CommittedEpoch 0, 4, 'm', 'e', 't', 'a', // CommittedMetadata } ) func TestOffsetCommitRequestV5AndPlus(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *OffsetCommitRequest }{ { "v5", 5, offsetCommitRequestOneBlockV5, &OffsetCommitRequest{ Version: 5, ConsumerGroup: "foo", ConsumerGroupGeneration: 1, ConsumerID: "mid", blocks: map[string]map[int32]*offsetCommitRequestBlock{ "topic": { 1: &offsetCommitRequestBlock{offset: 2, metadata: "meta"}, }, }, }, }, { "v6", 6, offsetCommitRequestOneBlockV6, &OffsetCommitRequest{ Version: 6, ConsumerGroup: "foo", ConsumerGroupGeneration: 1, ConsumerID: "mid", blocks: map[string]map[int32]*offsetCommitRequestBlock{ "topic": { 1: &offsetCommitRequestBlock{offset: 2, metadata: "meta", committedLeaderEpoch: 3}, }, }, }, }, { "v7", 7, offsetCommitRequestOneBlockV7, &OffsetCommitRequest{ Version: 7, ConsumerGroup: "foo", ConsumerGroupGeneration: 1, ConsumerID: "mid", GroupInstanceId: &groupInstanceId, blocks: map[string]map[int32]*offsetCommitRequestBlock{ "topic": { 1: &offsetCommitRequestBlock{offset: 2, metadata: "meta", committedLeaderEpoch: 3}, }, }, }, }, } for _, c := range tests { request := new(OffsetCommitRequest) testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, request) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/offset_commit_response.go000066400000000000000000000047101461256741300234550ustar00rootroot00000000000000package sarama import "time" type OffsetCommitResponse struct { Version int16 ThrottleTimeMs int32 Errors map[string]map[int32]KError } func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { if r.Errors == nil { r.Errors = make(map[string]map[int32]KError) } partitions := r.Errors[topic] if partitions == nil { partitions = make(map[int32]KError) r.Errors[topic] = partitions } partitions[partition] = kerror } func (r *OffsetCommitResponse) encode(pe packetEncoder) error { if r.Version >= 3 { pe.putInt32(r.ThrottleTimeMs) } if err := pe.putArrayLength(len(r.Errors)); err != nil { return err } for topic, partitions := range r.Errors { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(partitions)); err != nil { return err } for partition, kerror := range partitions { pe.putInt32(partition) pe.putInt16(int16(kerror)) } } return nil } func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if version >= 3 { r.ThrottleTimeMs, err = pd.getInt32() if err != nil { return err } } numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err } r.Errors = make(map[string]map[int32]KError, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { return err } numErrors, err := pd.getArrayLength() if err != nil { return err } r.Errors[name] = make(map[int32]KError, numErrors) for j := 0; j < numErrors; j++ { id, err := pd.getInt32() if err != nil { return err } tmp, err := pd.getInt16() if err != nil { return err } r.Errors[name][id] = KError(tmp) } } return nil } func (r *OffsetCommitResponse) key() int16 { return 8 } func (r *OffsetCommitResponse) version() int16 { return r.Version } func (r *OffsetCommitResponse) headerVersion() int16 { return 0 } func (r *OffsetCommitResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_3_0_0 case 5, 6: return V2_1_0_0 case 4: return V2_0_0_0 case 3: return V0_11_0_0 case 2: return V0_9_0_0 case 0, 1: return V0_8_2_0 default: return V2_4_0_0 } } func (r *OffsetCommitResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } golang-github-ibm-sarama-1.43.2/offset_commit_response_test.go000066400000000000000000000047471461256741300245260ustar00rootroot00000000000000package sarama import ( "fmt" "reflect" "testing" ) var ( emptyOffsetCommitResponseV0 = []byte{ 0x00, 0x00, 0x00, 0x00, // Empty topic } noEmptyOffsetCommitResponseV0 = []byte{ 0, 0, 0, 1, // Topic Len 0, 5, 't', 'o', 'p', 'i', 'c', // Name 0, 0, 0, 1, // Partition Len 0, 0, 0, 3, // PartitionIndex 0, 0, // ErrorCode } noEmptyOffsetCommitResponseV3 = []byte{ 0, 0, 0, 100, // ThrottleTimeMs 0, 0, 0, 1, // Topic Len 0, 5, 't', 'o', 'p', 'i', 'c', // Name 0, 0, 0, 1, // Partition Len 0, 0, 0, 3, // PartitionIndex 0, 0, // ErrorCode } ) func TestEmptyOffsetCommitResponse(t *testing.T) { // groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *OffsetCommitResponse }{ { "v0-empty", 0, emptyOffsetCommitResponseV0, &OffsetCommitResponse{ Version: 0, }, }, { "v0-two-partition", 0, noEmptyOffsetCommitResponseV0, &OffsetCommitResponse{ Version: 0, Errors: map[string]map[int32]KError{ "topic": { 3: ErrNoError, }, }, }, }, { "v3", 3, noEmptyOffsetCommitResponseV3, &OffsetCommitResponse{ ThrottleTimeMs: 100, Version: 3, Errors: map[string]map[int32]KError{ "topic": { 3: ErrNoError, }, }, }, }, } for _, c := range tests { response := new(OffsetCommitResponse) testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } func TestNormalOffsetCommitResponse(t *testing.T) { response := OffsetCommitResponse{} response.AddError("t", 0, ErrNotLeaderForPartition) response.Errors["m"] = make(map[int32]KError) // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. testResponse(t, "normal", &response, nil) } func TestOffsetCommitResponseWithThrottleTime(t *testing.T) { for version := 3; version <= 4; version++ { response := OffsetCommitResponse{ Version: int16(version), ThrottleTimeMs: 123, } response.AddError("t", 0, ErrNotLeaderForPartition) response.Errors["m"] = make(map[int32]KError) // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. testResponse(t, fmt.Sprintf("v%d with throttle time", version), &response, nil) } } golang-github-ibm-sarama-1.43.2/offset_fetch_request.go000066400000000000000000000116741461256741300231170ustar00rootroot00000000000000package sarama type OffsetFetchRequest struct { Version int16 ConsumerGroup string RequireStable bool // requires v7+ partitions map[string][]int32 } func NewOffsetFetchRequest( version KafkaVersion, group string, partitions map[string][]int32, ) *OffsetFetchRequest { request := &OffsetFetchRequest{ ConsumerGroup: group, partitions: partitions, } if version.IsAtLeast(V2_5_0_0) { // Version 7 is adding the require stable flag. request.Version = 7 } else if version.IsAtLeast(V2_4_0_0) { // Version 6 is the first flexible version. request.Version = 6 } else if version.IsAtLeast(V2_1_0_0) { // Version 3, 4, and 5 are the same as version 2. request.Version = 5 } else if version.IsAtLeast(V2_0_0_0) { request.Version = 4 } else if version.IsAtLeast(V0_11_0_0) { request.Version = 3 } else if version.IsAtLeast(V0_10_2_0) { // Starting in version 2, the request can contain a null topics array to indicate that offsets // for all topics should be fetched. It also returns a top level error code // for group or coordinator level errors. request.Version = 2 } else if version.IsAtLeast(V0_8_2_0) { // In version 0, the request read offsets from ZK. // // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic. request.Version = 1 } return request } func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} } isFlexible := r.Version >= 6 if isFlexible { err = pe.putCompactString(r.ConsumerGroup) } else { err = pe.putString(r.ConsumerGroup) } if err != nil { return err } if isFlexible { if r.partitions == nil { pe.putUVarint(0) } else { pe.putCompactArrayLength(len(r.partitions)) } } else { if r.partitions == nil && r.Version >= 2 { pe.putInt32(-1) } else { if err = pe.putArrayLength(len(r.partitions)); err != nil { return err } } } for topic, partitions := range r.partitions { if isFlexible { err = pe.putCompactString(topic) } else { err = pe.putString(topic) } if err != nil { return err } // if isFlexible { err = pe.putCompactInt32Array(partitions) } else { err = pe.putInt32Array(partitions) } if err != nil { return err } if isFlexible { pe.putEmptyTaggedFieldArray() } } if r.RequireStable && r.Version < 7 { return PacketEncodingError{"requireStable is not supported. use version 7 or later"} } if r.Version >= 7 { pe.putBool(r.RequireStable) } if isFlexible { pe.putEmptyTaggedFieldArray() } return nil } func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version isFlexible := r.Version >= 6 if isFlexible { r.ConsumerGroup, err = pd.getCompactString() } else { r.ConsumerGroup, err = pd.getString() } if err != nil { return err } var partitionCount int if isFlexible { partitionCount, err = pd.getCompactArrayLength() } else { partitionCount, err = pd.getArrayLength() } if err != nil { return err } if (partitionCount == 0 && version < 2) || partitionCount < 0 { return nil } r.partitions = make(map[string][]int32, partitionCount) for i := 0; i < partitionCount; i++ { var topic string if isFlexible { topic, err = pd.getCompactString() } else { topic, err = pd.getString() } if err != nil { return err } var partitions []int32 if isFlexible { partitions, err = pd.getCompactInt32Array() } else { partitions, err = pd.getInt32Array() } if err != nil { return err } if isFlexible { _, err = pd.getEmptyTaggedFieldArray() if err != nil { return err } } r.partitions[topic] = partitions } if r.Version >= 7 { r.RequireStable, err = pd.getBool() if err != nil { return err } } if isFlexible { _, err = pd.getEmptyTaggedFieldArray() if err != nil { return err } } return nil } func (r *OffsetFetchRequest) key() int16 { return 9 } func (r *OffsetFetchRequest) version() int16 { return r.Version } func (r *OffsetFetchRequest) headerVersion() int16 { if r.Version >= 6 { return 2 } return 1 } func (r *OffsetFetchRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_5_0_0 case 6: return V2_4_0_0 case 5: return V2_1_0_0 case 4: return V2_0_0_0 case 3: return V0_11_0_0 case 2: return V0_10_2_0 case 1: return V0_8_2_0 case 0: return V0_8_2_0 default: return V2_5_0_0 } } func (r *OffsetFetchRequest) ZeroPartitions() { if r.partitions == nil && r.Version >= 2 { r.partitions = make(map[string][]int32) } } func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { if r.partitions == nil { r.partitions = make(map[string][]int32) } r.partitions[topic] = append(r.partitions[topic], partitionID) } golang-github-ibm-sarama-1.43.2/offset_fetch_request_test.go000066400000000000000000000065351461256741300241560ustar00rootroot00000000000000package sarama import ( "fmt" "testing" ) var ( offsetFetchRequestNoGroupNoPartitions = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } offsetFetchRequestNoPartitionsV6 = []byte{ 0x05, 'b', 'l', 'a', 'h', 0x01, 0x00, } offsetFetchRequestNoPartitionsV7 = []byte{ 0x05, 'b', 'l', 'a', 'h', 0x01, 0x01, 0x00, } offsetFetchRequestNoPartitions = []byte{ 0x00, 0x04, 'b', 'l', 'a', 'h', 0x00, 0x00, 0x00, 0x00, } offsetFetchRequestOnePartition = []byte{ 0x00, 0x04, 'b', 'l', 'a', 'h', 0x00, 0x00, 0x00, 0x01, 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', 0x00, 0x00, 0x00, 0x01, 0x4F, 0x4F, 0x4F, 0x4F, } offsetFetchRequestOnePartitionV6 = []byte{ 0x05, 'b', 'l', 'a', 'h', 0x02, 0x0E, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', 0x02, 0x4F, 0x4F, 0x4F, 0x4F, 0x00, 0x00, } offsetFetchRequestOnePartitionV7 = []byte{ 0x05, 'b', 'l', 'a', 'h', 0x02, 0x0E, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', 0x02, 0x4F, 0x4F, 0x4F, 0x4F, 0x00, 0x00, 0x00, } offsetFetchRequestAllPartitions = []byte{ 0x00, 0x04, 'b', 'l', 'a', 'h', 0xff, 0xff, 0xff, 0xff, } ) func TestOffsetFetchRequestNoPartitions(t *testing.T) { for version := 0; version <= 5; version++ { request := new(OffsetFetchRequest) request.Version = int16(version) request.ZeroPartitions() testRequest(t, fmt.Sprintf("no group, no partitions %d", version), request, offsetFetchRequestNoGroupNoPartitions) request.ConsumerGroup = "blah" testRequest(t, fmt.Sprintf("no partitions %d", version), request, offsetFetchRequestNoPartitions) } { // v6 version := 6 request := new(OffsetFetchRequest) request.Version = int16(version) request.ConsumerGroup = "blah" request.ZeroPartitions() testRequest(t, fmt.Sprintf("no partitions %d", version), request, offsetFetchRequestNoPartitionsV6) } { // v7 version := 7 request := new(OffsetFetchRequest) request.Version = int16(version) request.ConsumerGroup = "blah" request.RequireStable = true request.ZeroPartitions() testRequest(t, fmt.Sprintf("no partitions %d", version), request, offsetFetchRequestNoPartitionsV7) } } func TestOffsetFetchRequest(t *testing.T) { for version := 0; version <= 5; version++ { request := new(OffsetFetchRequest) request.Version = int16(version) request.ConsumerGroup = "blah" request.AddPartition("topicTheFirst", 0x4F4F4F4F) testRequest(t, fmt.Sprintf("one partition %d", version), request, offsetFetchRequestOnePartition) } { // v6 version := 6 request := new(OffsetFetchRequest) request.Version = int16(version) request.ConsumerGroup = "blah" request.AddPartition("topicTheFirst", 0x4F4F4F4F) testRequest(t, fmt.Sprintf("one partition %d", version), request, offsetFetchRequestOnePartitionV6) } { // v7 version := 7 request := new(OffsetFetchRequest) request.Version = int16(version) request.ConsumerGroup = "blah" request.AddPartition("topicTheFirst", 0x4F4F4F4F) testRequest(t, fmt.Sprintf("one partition %d", version), request, offsetFetchRequestOnePartitionV7) } } func TestOffsetFetchRequestAllPartitions(t *testing.T) { for version := 2; version <= 5; version++ { request := &OffsetFetchRequest{Version: int16(version), ConsumerGroup: "blah"} testRequest(t, fmt.Sprintf("all partitions %d", version), request, offsetFetchRequestAllPartitions) } } golang-github-ibm-sarama-1.43.2/offset_fetch_response.go000066400000000000000000000123461461256741300232620ustar00rootroot00000000000000package sarama import "time" type OffsetFetchResponseBlock struct { Offset int64 LeaderEpoch int32 Metadata string Err KError } func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { isFlexible := version >= 6 b.Offset, err = pd.getInt64() if err != nil { return err } if version >= 5 { b.LeaderEpoch, err = pd.getInt32() if err != nil { return err } } else { b.LeaderEpoch = -1 } if isFlexible { b.Metadata, err = pd.getCompactString() } else { b.Metadata, err = pd.getString() } if err != nil { return err } tmp, err := pd.getInt16() if err != nil { return err } b.Err = KError(tmp) if isFlexible { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { isFlexible := version >= 6 pe.putInt64(b.Offset) if version >= 5 { pe.putInt32(b.LeaderEpoch) } if isFlexible { err = pe.putCompactString(b.Metadata) } else { err = pe.putString(b.Metadata) } if err != nil { return err } pe.putInt16(int16(b.Err)) if isFlexible { pe.putEmptyTaggedFieldArray() } return nil } type OffsetFetchResponse struct { Version int16 ThrottleTimeMs int32 Blocks map[string]map[int32]*OffsetFetchResponseBlock Err KError } func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) { isFlexible := r.Version >= 6 if r.Version >= 3 { pe.putInt32(r.ThrottleTimeMs) } if isFlexible { pe.putCompactArrayLength(len(r.Blocks)) } else { err = pe.putArrayLength(len(r.Blocks)) } if err != nil { return err } for topic, partitions := range r.Blocks { if isFlexible { err = pe.putCompactString(topic) } else { err = pe.putString(topic) } if err != nil { return err } if isFlexible { pe.putCompactArrayLength(len(partitions)) } else { err = pe.putArrayLength(len(partitions)) } if err != nil { return err } for partition, block := range partitions { pe.putInt32(partition) if err := block.encode(pe, r.Version); err != nil { return err } } if isFlexible { pe.putEmptyTaggedFieldArray() } } if r.Version >= 2 { pe.putInt16(int16(r.Err)) } if isFlexible { pe.putEmptyTaggedFieldArray() } return nil } func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version isFlexible := version >= 6 if version >= 3 { r.ThrottleTimeMs, err = pd.getInt32() if err != nil { return err } } var numTopics int if isFlexible { numTopics, err = pd.getCompactArrayLength() } else { numTopics, err = pd.getArrayLength() } if err != nil { return err } if numTopics > 0 { r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) for i := 0; i < numTopics; i++ { var name string if isFlexible { name, err = pd.getCompactString() } else { name, err = pd.getString() } if err != nil { return err } var numBlocks int if isFlexible { numBlocks, err = pd.getCompactArrayLength() } else { numBlocks, err = pd.getArrayLength() } if err != nil { return err } r.Blocks[name] = nil if numBlocks > 0 { r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) } for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() if err != nil { return err } block := new(OffsetFetchResponseBlock) err = block.decode(pd, version) if err != nil { return err } r.Blocks[name][id] = block } if isFlexible { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } } } if version >= 2 { kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) } if isFlexible { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return nil } func (r *OffsetFetchResponse) key() int16 { return 9 } func (r *OffsetFetchResponse) version() int16 { return r.Version } func (r *OffsetFetchResponse) headerVersion() int16 { if r.Version >= 6 { return 1 } return 0 } func (r *OffsetFetchResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_5_0_0 case 6: return V2_4_0_0 case 5: return V2_1_0_0 case 4: return V2_0_0_0 case 3: return V0_11_0_0 case 2: return V0_10_2_0 case 1: return V0_8_2_0 case 0: return V0_8_2_0 default: return V2_5_0_0 } } func (r *OffsetFetchResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { if r.Blocks == nil { return nil } if r.Blocks[topic] == nil { return nil } return r.Blocks[topic][partition] } func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) } partitions := r.Blocks[topic] if partitions == nil { partitions = make(map[int32]*OffsetFetchResponseBlock) r.Blocks[topic] = partitions } partitions[partition] = block } golang-github-ibm-sarama-1.43.2/offset_fetch_response_test.go000066400000000000000000000044661461256741300243250ustar00rootroot00000000000000package sarama import ( "fmt" "testing" ) var ( emptyOffsetFetchResponse = []byte{ 0x00, 0x00, 0x00, 0x00, } emptyOffsetFetchResponseV2 = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2A, } emptyOffsetFetchResponseV3 = []byte{ 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2A, } ) func TestEmptyOffsetFetchResponse(t *testing.T) { for version := 0; version <= 1; version++ { response := OffsetFetchResponse{Version: int16(version)} testResponse(t, fmt.Sprintf("empty v%d", version), &response, emptyOffsetFetchResponse) } responseV2 := OffsetFetchResponse{Version: 2, Err: ErrInvalidRequest} testResponse(t, "empty V2", &responseV2, emptyOffsetFetchResponseV2) for version := 3; version <= 5; version++ { responseV3 := OffsetFetchResponse{Version: int16(version), Err: ErrInvalidRequest, ThrottleTimeMs: 9} testResponse(t, fmt.Sprintf("empty v%d", version), &responseV3, emptyOffsetFetchResponseV3) } } func TestNormalOffsetFetchResponse(t *testing.T) { // The response encoded form cannot be checked for it varies due to // unpredictable map traversal order. // Hence the 'nil' as byte[] parameter in the 'testResponse(..)' calls for version := 0; version <= 1; version++ { response := OffsetFetchResponse{Version: int16(version)} response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, -1, "md", ErrRequestTimedOut}) response.Blocks["m"] = nil testResponse(t, fmt.Sprintf("Normal v%d", version), &response, nil) } responseV2 := OffsetFetchResponse{Version: 2, Err: ErrInvalidRequest} responseV2.AddBlock("t", 0, &OffsetFetchResponseBlock{0, -1, "md", ErrRequestTimedOut}) responseV2.Blocks["m"] = nil testResponse(t, "normal V2", &responseV2, nil) for version := 3; version <= 4; version++ { responseV3 := OffsetFetchResponse{Version: int16(version), Err: ErrInvalidRequest, ThrottleTimeMs: 9} responseV3.AddBlock("t", 0, &OffsetFetchResponseBlock{0, -1, "md", ErrRequestTimedOut}) responseV3.Blocks["m"] = nil testResponse(t, fmt.Sprintf("Normal v%d", version), &responseV3, nil) } responseV5 := OffsetFetchResponse{Version: 5, Err: ErrInvalidRequest, ThrottleTimeMs: 9} responseV5.AddBlock("t", 0, &OffsetFetchResponseBlock{Offset: 10, LeaderEpoch: 100, Metadata: "md", Err: ErrRequestTimedOut}) responseV5.Blocks["m"] = nil testResponse(t, "normal V5", &responseV5, nil) } golang-github-ibm-sarama-1.43.2/offset_manager.go000066400000000000000000000420321461256741300216600ustar00rootroot00000000000000package sarama import ( "sync" "time" ) // Offset Manager // OffsetManager uses Kafka to store and fetch consumed partition offsets. type OffsetManager interface { // ManagePartition creates a PartitionOffsetManager on the given topic/partition. // It will return an error if this OffsetManager is already managing the given // topic/partition. ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) // Close stops the OffsetManager from managing offsets. It is required to call // this function before an OffsetManager object passes out of scope, as it // will otherwise leak memory. You must call this after all the // PartitionOffsetManagers are closed. Close() error // Commit commits the offsets. This method can be used if AutoCommit.Enable is // set to false. Commit() } type offsetManager struct { client Client conf *Config group string ticker *time.Ticker sessionCanceler func() memberID string groupInstanceId *string generation int32 broker *Broker brokerLock sync.RWMutex poms map[string]map[int32]*partitionOffsetManager pomsLock sync.RWMutex closeOnce sync.Once closing chan none closed chan none } // NewOffsetManagerFromClient creates a new OffsetManager from the given client. // It is still necessary to call Close() on the underlying client when finished with the partition manager. func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil) } func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } conf := client.Config() om := &offsetManager{ client: client, conf: conf, group: group, poms: make(map[string]map[int32]*partitionOffsetManager), sessionCanceler: sessionCanceler, memberID: memberID, generation: generation, closing: make(chan none), closed: make(chan none), } if conf.Consumer.Group.InstanceId != "" { om.groupInstanceId = &conf.Consumer.Group.InstanceId } if conf.Consumer.Offsets.AutoCommit.Enable { om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) go withRecover(om.mainLoop) } return om, nil } func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { pom, err := om.newPartitionOffsetManager(topic, partition) if err != nil { return nil, err } om.pomsLock.Lock() defer om.pomsLock.Unlock() topicManagers := om.poms[topic] if topicManagers == nil { topicManagers = make(map[int32]*partitionOffsetManager) om.poms[topic] = topicManagers } if topicManagers[partition] != nil { return nil, ConfigurationError("That topic/partition is already being managed") } topicManagers[partition] = pom return pom, nil } func (om *offsetManager) Close() error { om.closeOnce.Do(func() { // exit the mainLoop close(om.closing) if om.conf.Consumer.Offsets.AutoCommit.Enable { <-om.closed } // mark all POMs as closed om.asyncClosePOMs() // flush one last time if om.conf.Consumer.Offsets.AutoCommit.Enable { for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { om.flushToBroker() if om.releasePOMs(false) == 0 { break } } } om.releasePOMs(true) om.brokerLock.Lock() om.broker = nil om.brokerLock.Unlock() }) return nil } func (om *offsetManager) computeBackoff(retries int) time.Duration { if om.conf.Metadata.Retry.BackoffFunc != nil { return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) } else { return om.conf.Metadata.Retry.Backoff } } func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) { broker, err := om.coordinator() if err != nil { if retries <= 0 { return 0, 0, "", err } return om.fetchInitialOffset(topic, partition, retries-1) } partitions := map[string][]int32{topic: {partition}} req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions) resp, err := broker.FetchOffset(req) if err != nil { if retries <= 0 { return 0, 0, "", err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) } block := resp.GetBlock(topic, partition) if block == nil { return 0, 0, "", ErrIncompleteResponse } switch block.Err { case ErrNoError: return block.Offset, block.LeaderEpoch, block.Metadata, nil case ErrNotCoordinatorForConsumer: if retries <= 0 { return 0, 0, "", block.Err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) case ErrOffsetsLoadInProgress: if retries <= 0 { return 0, 0, "", block.Err } backoff := om.computeBackoff(retries) select { case <-om.closing: return 0, 0, "", block.Err case <-time.After(backoff): } return om.fetchInitialOffset(topic, partition, retries-1) default: return 0, 0, "", block.Err } } func (om *offsetManager) coordinator() (*Broker, error) { om.brokerLock.RLock() broker := om.broker om.brokerLock.RUnlock() if broker != nil { return broker, nil } om.brokerLock.Lock() defer om.brokerLock.Unlock() if broker := om.broker; broker != nil { return broker, nil } if err := om.client.RefreshCoordinator(om.group); err != nil { return nil, err } broker, err := om.client.Coordinator(om.group) if err != nil { return nil, err } om.broker = broker return broker, nil } func (om *offsetManager) releaseCoordinator(b *Broker) { om.brokerLock.Lock() if om.broker == b { om.broker = nil } om.brokerLock.Unlock() } func (om *offsetManager) mainLoop() { defer om.ticker.Stop() defer close(om.closed) for { select { case <-om.ticker.C: om.Commit() case <-om.closing: return } } } func (om *offsetManager) Commit() { om.flushToBroker() om.releasePOMs(false) } func (om *offsetManager) flushToBroker() { req := om.constructRequest() if req == nil { return } broker, err := om.coordinator() if err != nil { om.handleError(err) return } resp, err := broker.CommitOffset(req) if err != nil { om.handleError(err) om.releaseCoordinator(broker) _ = broker.Close() return } om.handleResponse(broker, req, resp) } func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, ConsumerGroup: om.group, ConsumerID: om.memberID, ConsumerGroupGeneration: om.generation, } // Version 1 adds timestamp and group membership information, as well as the commit timestamp. // // Version 2 adds retention time. It removes the commit timestamp added in version 1. if om.conf.Version.IsAtLeast(V0_9_0_0) { r.Version = 2 } // Version 3 and 4 are the same as version 2. if om.conf.Version.IsAtLeast(V0_11_0_0) { r.Version = 3 } if om.conf.Version.IsAtLeast(V2_0_0_0) { r.Version = 4 } // Version 5 removes the retention time, which is now controlled only by a broker configuration. // // Version 6 adds the leader epoch for fencing. if om.conf.Version.IsAtLeast(V2_1_0_0) { r.Version = 6 } // version 7 adds a new field called groupInstanceId to indicate member identity across restarts. if om.conf.Version.IsAtLeast(V2_3_0_0) { r.Version = 7 r.GroupInstanceId = om.groupInstanceId } // commit timestamp was only briefly supported in V1 where we set it to // ReceiveTime (-1) to tell the broker to set it to the time when the commit // request was received var commitTimestamp int64 if r.Version == 1 { commitTimestamp = ReceiveTime } // request controlled retention was only supported from V2-V4 (it became // broker-only after that) so if the user has set the config options then // flow those through as retention time on the commit request. if r.Version >= 2 && r.Version < 5 { // Map Sarama's default of 0 to Kafka's default of -1 r.RetentionTime = -1 if om.conf.Consumer.Offsets.Retention > 0 { r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond) } } om.pomsLock.RLock() defer om.pomsLock.RUnlock() for _, topicManagers := range om.poms { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata) } pom.lock.Unlock() } } if len(r.blocks) > 0 { return r } return nil } func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) { om.pomsLock.RLock() defer om.pomsLock.RUnlock() for _, topicManagers := range om.poms { for _, pom := range topicManagers { if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil { continue } var err KError var ok bool if resp.Errors[pom.topic] == nil { pom.handleError(ErrIncompleteResponse) continue } if err, ok = resp.Errors[pom.topic][pom.partition]; !ok { pom.handleError(ErrIncompleteResponse) continue } switch err { case ErrNoError: block := req.blocks[pom.topic][pom.partition] pom.updateCommitted(block.offset, block.metadata) case ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: // not a critical error, we just need to redispatch om.releaseCoordinator(broker) case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: // nothing we can do about this, just tell the user and carry on pom.handleError(err) case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching pom.handleError(err) om.releaseCoordinator(broker) } } } } func (om *offsetManager) handleError(err error) { om.pomsLock.RLock() defer om.pomsLock.RUnlock() for _, topicManagers := range om.poms { for _, pom := range topicManagers { pom.handleError(err) } } } func (om *offsetManager) asyncClosePOMs() { om.pomsLock.RLock() defer om.pomsLock.RUnlock() for _, topicManagers := range om.poms { for _, pom := range topicManagers { pom.AsyncClose() } } } // Releases/removes closed POMs once they are clean (or when forced) func (om *offsetManager) releasePOMs(force bool) (remaining int) { om.pomsLock.Lock() defer om.pomsLock.Unlock() for topic, topicManagers := range om.poms { for partition, pom := range topicManagers { pom.lock.Lock() releaseDue := pom.done && (force || !pom.dirty) pom.lock.Unlock() if releaseDue { pom.release() delete(om.poms[topic], partition) if len(om.poms[topic]) == 0 { delete(om.poms, topic) } } } remaining += len(om.poms[topic]) } return } func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager { om.pomsLock.RLock() defer om.pomsLock.RUnlock() if partitions, ok := om.poms[topic]; ok { if pom, ok := partitions[partition]; ok { return pom } } return nil } func (om *offsetManager) tryCancelSession() { if om.sessionCanceler != nil { om.sessionCanceler() } } // Partition Offset Manager // PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() // on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes // out of scope. type PartitionOffsetManager interface { // NextOffset returns the next offset that should be consumed for the managed // partition, accompanied by metadata which can be used to reconstruct the state // of the partition consumer when it resumes. NextOffset() will return // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset // was committed for this partition yet. NextOffset() (int64, string) // MarkOffset marks the provided offset, alongside a metadata string // that represents the state of the partition consumer at that point in time. The // metadata string can be used by another consumer to restore that state, so it // can resume consumption. // // To follow upstream conventions, you are expected to mark the offset of the // next message to read, not the last message read. Thus, when calling `MarkOffset` // you should typically add one to the offset of the last consumed message. // // Note: calling MarkOffset does not necessarily commit the offset to the backend // store immediately for efficiency reasons, and it may never be committed if // your application crashes. This means that you may end up processing the same // message twice, and your processing should ideally be idempotent. MarkOffset(offset int64, metadata string) // ResetOffset resets to the provided offset, alongside a metadata string that // represents the state of the partition consumer at that point in time. Reset // acts as a counterpart to MarkOffset, the difference being that it allows to // reset an offset to an earlier or smaller value, where MarkOffset only // allows incrementing the offset. cf MarkOffset for more details. ResetOffset(offset int64, metadata string) // Errors returns a read channel of errors that occur during offset management, if // enabled. By default, errors are logged and not returned over this channel. If // you want to implement any custom error handling, set your config's // Consumer.Return.Errors setting to true, and read from this channel. Errors() <-chan *ConsumerError // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will // return immediately, after which you should wait until the 'errors' channel has // been drained and closed. It is required to call this function, or Close before // a consumer object passes out of scope, as it will otherwise leak memory. You // must call this before calling Close on the underlying client. AsyncClose() // Close stops the PartitionOffsetManager from managing offsets. It is required to // call this function (or AsyncClose) before a PartitionOffsetManager object // passes out of scope, as it will otherwise leak memory. You must call this // before calling Close on the underlying client. Close() error } type partitionOffsetManager struct { parent *offsetManager topic string partition int32 leaderEpoch int32 lock sync.Mutex offset int64 metadata string dirty bool done bool releaseOnce sync.Once errors chan *ConsumerError } func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) if err != nil { return nil, err } return &partitionOffsetManager{ parent: om, topic: topic, partition: partition, leaderEpoch: leaderEpoch, errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), offset: offset, metadata: metadata, }, nil } func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { return pom.errors } func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() if offset > pom.offset { pom.offset = offset pom.metadata = metadata pom.dirty = true } } func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() if offset <= pom.offset { pom.offset = offset pom.metadata = metadata pom.dirty = true } } func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() if pom.offset == offset && pom.metadata == metadata { pom.dirty = false } } func (pom *partitionOffsetManager) NextOffset() (int64, string) { pom.lock.Lock() defer pom.lock.Unlock() if pom.offset >= 0 { return pom.offset, pom.metadata } return pom.parent.conf.Consumer.Offsets.Initial, "" } func (pom *partitionOffsetManager) AsyncClose() { pom.lock.Lock() pom.done = true pom.lock.Unlock() } func (pom *partitionOffsetManager) Close() error { pom.AsyncClose() var errors ConsumerErrors for err := range pom.errors { errors = append(errors, err) } if len(errors) > 0 { return errors } return nil } func (pom *partitionOffsetManager) handleError(err error) { cErr := &ConsumerError{ Topic: pom.topic, Partition: pom.partition, Err: err, } if pom.parent.conf.Consumer.Return.Errors { pom.errors <- cErr } else { Logger.Println(cErr) } } func (pom *partitionOffsetManager) release() { pom.releaseOnce.Do(func() { close(pom.errors) }) } golang-github-ibm-sarama-1.43.2/offset_manager_test.go000066400000000000000000000440721461256741300227250ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "sync/atomic" "testing" "time" ) func initOffsetManagerWithBackoffFunc( t *testing.T, retention time.Duration, backoffFunc func(retries, maxRetries int) time.Duration, config *Config, ) (om OffsetManager, testClient Client, broker, coordinator *MockBroker) { config.Metadata.Retry.Max = 1 if backoffFunc != nil { config.Metadata.Retry.BackoffFunc = backoffFunc } config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Millisecond config.Version = V0_9_0_0 if retention > 0 { config.Consumer.Offsets.Retention = retention } broker = NewMockBroker(t, 1) coordinator = NewMockBroker(t, 2) seedMeta := new(MetadataResponse) seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID()) seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, []int32{}, ErrNoError) seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, []int32{}, ErrNoError) broker.Returns(seedMeta) var err error testClient, err = NewClient([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } coordinator.Returns(&ConsumerMetadataResponse{ CoordinatorID: coordinator.BrokerID(), CoordinatorHost: "127.0.0.1", CoordinatorPort: coordinator.Port(), }) om, err = NewOffsetManagerFromClient("group", testClient) if err != nil { t.Fatal(err) } return om, testClient, broker, coordinator } func initOffsetManager(t *testing.T, retention time.Duration) (om OffsetManager, testClient Client, broker, coordinator *MockBroker, ) { return initOffsetManagerWithBackoffFunc(t, retention, nil, NewTestConfig()) } func initPartitionOffsetManager(t *testing.T, om OffsetManager, coordinator *MockBroker, initialOffset int64, metadata string, ) PartitionOffsetManager { fetchResponse := new(OffsetFetchResponse) fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{ Err: ErrNoError, Offset: initialOffset, Metadata: metadata, }) coordinator.Returns(fetchResponse) pom, err := om.ManagePartition("my_topic", 0) if err != nil { t.Fatal(err) } return pom } func TestNewOffsetManager(t *testing.T) { seedBroker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) seedBroker.Returns(metadataResponse) defer seedBroker.Close() testClient, err := NewClient([]string{seedBroker.Addr()}, NewTestConfig()) if err != nil { t.Fatal(err) } om, err := NewOffsetManagerFromClient("group", testClient) if err != nil { t.Error(err) } safeClose(t, om) safeClose(t, testClient) _, err = NewOffsetManagerFromClient("group", testClient) if !errors.Is(err, ErrClosedClient) { t.Errorf("Error expected for closed client; actual value: %v", err) } } var offsetsautocommitTestTable = []struct { name string set bool // if given will override default configuration for Consumer.Offsets.AutoCommit.Enable enable bool }{ { "AutoCommit (default)", false, // use default true, }, { "AutoCommit Enabled", true, true, }, { "AutoCommit Disabled", true, false, }, } func TestNewOffsetManagerOffsetsAutoCommit(t *testing.T) { // Tests to validate configuration of `Consumer.Offsets.AutoCommit.Enable` for _, tt := range offsetsautocommitTestTable { tt := tt t.Run(tt.name, func(t *testing.T) { config := NewTestConfig() if tt.set { config.Consumer.Offsets.AutoCommit.Enable = tt.enable } om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, nil, config) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") // Wait long enough for the test not to fail.. timeout := 50 * config.Consumer.Offsets.AutoCommit.Interval called := make(chan none) ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) handler := func(req *request) (res encoderWithHeader) { close(called) return ocResponse } coordinator.setHandler(handler) // Should force an offset commit, if auto-commit is enabled. expected := int64(1) pom.ResetOffset(expected, "modified_meta") _, _ = pom.NextOffset() select { case <-called: // OffsetManager called on the wire. if !config.Consumer.Offsets.AutoCommit.Enable { t.Errorf("Received request for: %s when AutoCommit is disabled", tt.name) } case <-time.After(timeout): // Timeout waiting for OffsetManager to call on the wire. if config.Consumer.Offsets.AutoCommit.Enable { t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout) } } // !! om must be closed before the pom so pom.release() is called before pom.Close() safeClose(t, om) safeClose(t, pom) safeClose(t, testClient) }) } } func TestNewOffsetManagerOffsetsManualCommit(t *testing.T) { // Tests to validate configuration when `Consumer.Offsets.AutoCommit.Enable` is false config := NewTestConfig() config.Consumer.Offsets.AutoCommit.Enable = false om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, nil, config) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") // Wait long enough for the test not to fail.. timeout := 50 * config.Consumer.Offsets.AutoCommit.Interval ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) called := make(chan none) handler := func(req *request) (res encoderWithHeader) { close(called) return ocResponse } coordinator.setHandler(handler) // Should not trigger an auto-commit expected := int64(1) pom.ResetOffset(expected, "modified_meta") _, _ = pom.NextOffset() select { case <-called: // OffsetManager called on the wire. t.Errorf("Received request when AutoCommit is disabled") case <-time.After(timeout): // Timeout waiting for OffsetManager to call on the wire. // OK } // Setup again to test manual commit called = make(chan none) om.Commit() select { case <-called: // OffsetManager called on the wire. // OK case <-time.After(timeout): // Timeout waiting for OffsetManager to call on the wire. t.Errorf("No request received for after waiting for %v", timeout) } // !! om must be closed before the pom so pom.release() is called before pom.Close() safeClose(t, om) safeClose(t, pom) safeClose(t, testClient) } // Test recovery from ErrNotCoordinatorForConsumer // on first fetchInitialOffset call func TestOffsetManagerFetchInitialFail(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() // Error on first fetchInitialOffset call responseBlock := OffsetFetchResponseBlock{ Err: ErrNotCoordinatorForConsumer, Offset: 5, Metadata: "test_meta", } fetchResponse := new(OffsetFetchResponse) fetchResponse.AddBlock("my_topic", 0, &responseBlock) coordinator.Returns(fetchResponse) // Refresh coordinator newCoordinator := NewMockBroker(t, 3) defer newCoordinator.Close() coordinator.Returns(&ConsumerMetadataResponse{ CoordinatorID: newCoordinator.BrokerID(), CoordinatorHost: "127.0.0.1", CoordinatorPort: newCoordinator.Port(), }) // Second fetchInitialOffset call is fine fetchResponse2 := new(OffsetFetchResponse) responseBlock2 := responseBlock responseBlock2.Err = ErrNoError fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) newCoordinator.Returns(fetchResponse2) pom, err := om.ManagePartition("my_topic", 0) if err != nil { t.Error(err) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } // Test fetchInitialOffset retry on ErrOffsetsLoadInProgress func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) { retryCount := int32(0) backoff := func(retries, maxRetries int) time.Duration { atomic.AddInt32(&retryCount, 1) return 0 } om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, backoff, NewTestConfig()) defer broker.Close() defer coordinator.Close() // Error on first fetchInitialOffset call responseBlock := OffsetFetchResponseBlock{ Err: ErrOffsetsLoadInProgress, Offset: 5, Metadata: "test_meta", } fetchResponse := new(OffsetFetchResponse) fetchResponse.AddBlock("my_topic", 0, &responseBlock) coordinator.Returns(fetchResponse) // Second fetchInitialOffset call is fine fetchResponse2 := new(OffsetFetchResponse) responseBlock2 := responseBlock responseBlock2.Err = ErrNoError fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) coordinator.Returns(fetchResponse2) pom, err := om.ManagePartition("my_topic", 0) if err != nil { t.Error(err) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) if atomic.LoadInt32(&retryCount) == 0 { t.Fatal("Expected at least one retry") } } func TestPartitionOffsetManagerInitialOffset(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() testClient.Config().Consumer.Offsets.Initial = OffsetOldest // Kafka returns -1 if no offset has been stored for this partition yet. pom := initPartitionOffsetManager(t, om, coordinator, -1, "") offset, meta := pom.NextOffset() if offset != OffsetOldest { t.Errorf("Expected offset 5. Actual: %v", offset) } if meta != "" { t.Errorf("Expected metadata to be empty. Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerNextOffset(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta") offset, meta := pom.NextOffset() if offset != 5 { t.Errorf("Expected offset 5. Actual: %v", offset) } if meta != "test_meta" { t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerResetOffset(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) coordinator.Returns(ocResponse) expected := int64(1) pom.ResetOffset(expected, "modified_meta") actual, meta := pom.NextOffset() if actual != expected { t.Errorf("Expected offset %v. Actual: %v", expected, actual) } if meta != "modified_meta" { t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, time.Hour) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) handler := func(req *request) (res encoderWithHeader) { if req.body.version() != 2 { t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) } offsetCommitRequest := req.body.(*OffsetCommitRequest) if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) } return ocResponse } coordinator.setHandler(handler) expected := int64(1) pom.ResetOffset(expected, "modified_meta") actual, meta := pom.NextOffset() if actual != expected { t.Errorf("Expected offset %v. Actual: %v", expected, actual) } if meta != "modified_meta" { t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerMarkOffset(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) coordinator.Returns(ocResponse) pom.MarkOffset(100, "modified_meta") offset, meta := pom.NextOffset() if offset != 100 { t.Errorf("Expected offset 100. Actual: %v", offset) } if meta != "modified_meta" { t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, time.Hour) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) handler := func(req *request) (res encoderWithHeader) { if req.body.version() != 2 { t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) } offsetCommitRequest := req.body.(*OffsetCommitRequest) if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) } return ocResponse } coordinator.setHandler(handler) pom.MarkOffset(100, "modified_meta") offset, meta := pom.NextOffset() if offset != 100 { t.Errorf("Expected offset 100. Actual: %v", offset) } if meta != "modified_meta" { t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) } safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } func TestPartitionOffsetManagerCommitErr(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() defer coordinator.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") // Error on one partition ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) ocResponse.AddError("my_topic", 1, ErrNoError) coordinator.Returns(ocResponse) // For RefreshCoordinator() coordinator.Returns(&ConsumerMetadataResponse{ CoordinatorID: coordinator.BrokerID(), CoordinatorHost: "127.0.0.1", CoordinatorPort: coordinator.Port(), }) // Nothing in response.Errors at all ocResponse2 := new(OffsetCommitResponse) coordinator.Returns(ocResponse2) // No error, no need to refresh coordinator // Error on the wrong partition for this pom ocResponse3 := new(OffsetCommitResponse) ocResponse3.AddError("my_topic", 1, ErrNoError) coordinator.Returns(ocResponse3) // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block ocResponse4 := new(OffsetCommitResponse) ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition) coordinator.Returns(ocResponse4) newCoordinator := NewMockBroker(t, 3) defer newCoordinator.Close() // For RefreshCoordinator() coordinator.Returns(&ConsumerMetadataResponse{ CoordinatorID: newCoordinator.BrokerID(), CoordinatorHost: "127.0.0.1", CoordinatorPort: newCoordinator.Port(), }) // Normal error response ocResponse5 := new(OffsetCommitResponse) ocResponse5.AddError("my_topic", 0, ErrNoError) newCoordinator.Returns(ocResponse5) pom.MarkOffset(100, "modified_meta") err := pom.Close() if err != nil { t.Error(err) } safeClose(t, om) safeClose(t, testClient) } // Test of recovery from abort func TestAbortPartitionOffsetManager(t *testing.T) { om, testClient, broker, coordinator := initOffsetManager(t, 0) defer broker.Close() pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") // this triggers an error in the CommitOffset request, // which leads to the abort call coordinator.Close() // Response to refresh coordinator request newCoordinator := NewMockBroker(t, 3) defer newCoordinator.Close() broker.Returns(&ConsumerMetadataResponse{ CoordinatorID: newCoordinator.BrokerID(), CoordinatorHost: "127.0.0.1", CoordinatorPort: newCoordinator.Port(), }) ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) newCoordinator.Returns(ocResponse) pom.MarkOffset(100, "modified_meta") safeClose(t, pom) safeClose(t, om) safeClose(t, testClient) } // Validate that the constructRequest() method correctly maps Sarama's default for // Config.Consumer.Offsets.Retention to the equivalent Kafka value. func TestConstructRequestRetentionTime(t *testing.T) { expectedRetention := func(version KafkaVersion, retention time.Duration) int64 { switch { case version.IsAtLeast(V2_1_0_0): // version >= 2.1.0: Client specified retention time isn't supported in the // offset commit request anymore, thus the retention time field set in the // OffsetCommitRequest struct should be 0. return 0 case version.IsAtLeast(V0_9_0_0): // 0.9.0 <= version < 2.1.0: Retention time *is* supported in the offset commit // request. Sarama's default retention times (0) must be mapped to the Kafka // default (-1). Non-zero Sarama times are converted from time.Duration to // an int64 millisecond value. if retention > 0 { return int64(retention / time.Millisecond) } else { return -1 } default: // version < 0.9.0: Client specified retention time is not supported in the offset // commit request, thus the retention time field set in the OffsetCommitRequest // struct should be 0. return 0 } } for _, version := range SupportedVersions { for _, retention := range []time.Duration{0, time.Millisecond} { name := fmt.Sprintf("version %s retention: %s", version, retention) t.Run(name, func(t *testing.T) { // Perform necessary setup for calling the constructRequest() method. This // test-case only cares about the code path that sets the retention time // field in the returned request struct. conf := NewTestConfig() conf.Version = version conf.Consumer.Offsets.Retention = retention om := &offsetManager{ conf: conf, poms: map[string]map[int32]*partitionOffsetManager{ "topic": { 0: { dirty: true, }, }, }, } req := om.constructRequest() expectedRetention := expectedRetention(version, retention) if req.RetentionTime != expectedRetention { t.Errorf("expected retention time %d, got: %d", expectedRetention, req.RetentionTime) } }) } } } golang-github-ibm-sarama-1.43.2/offset_request.go000066400000000000000000000077701461256741300217500ustar00rootroot00000000000000package sarama type offsetRequestBlock struct { // currentLeaderEpoch contains the current leader epoch (used in version 4+). currentLeaderEpoch int32 // timestamp contains the current timestamp. timestamp int64 // maxNumOffsets contains the maximum number of offsets to report. maxNumOffsets int32 // Only used in version 0 } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { if version >= 4 { pe.putInt32(b.currentLeaderEpoch) } pe.putInt64(b.timestamp) if version == 0 { pe.putInt32(b.maxNumOffsets) } return nil } func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { b.currentLeaderEpoch = -1 if version >= 4 { if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { return err } } if b.timestamp, err = pd.getInt64(); err != nil { return err } if version == 0 { if b.maxNumOffsets, err = pd.getInt32(); err != nil { return err } } return nil } type OffsetRequest struct { Version int16 IsolationLevel IsolationLevel replicaID int32 isReplicaIDSet bool blocks map[string]map[int32]*offsetRequestBlock } func (r *OffsetRequest) encode(pe packetEncoder) error { if r.isReplicaIDSet { pe.putInt32(r.replicaID) } else { // default replica ID is always -1 for clients pe.putInt32(-1) } if r.Version >= 2 { pe.putBool(r.IsolationLevel == ReadCommitted) } err := pe.putArrayLength(len(r.blocks)) if err != nil { return err } for topic, partitions := range r.blocks { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(partitions)) if err != nil { return err } for partition, block := range partitions { pe.putInt32(partition) if err = block.encode(pe, r.Version); err != nil { return err } } } return nil } func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { r.Version = version replicaID, err := pd.getInt32() if err != nil { return err } if replicaID >= 0 { r.SetReplicaID(replicaID) } if r.Version >= 2 { tmp, err := pd.getBool() if err != nil { return err } r.IsolationLevel = ReadUncommitted if tmp { r.IsolationLevel = ReadCommitted } } blockCount, err := pd.getArrayLength() if err != nil { return err } if blockCount == 0 { return nil } r.blocks = make(map[string]map[int32]*offsetRequestBlock) for i := 0; i < blockCount; i++ { topic, err := pd.getString() if err != nil { return err } partitionCount, err := pd.getArrayLength() if err != nil { return err } r.blocks[topic] = make(map[int32]*offsetRequestBlock) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } block := &offsetRequestBlock{} if err := block.decode(pd, version); err != nil { return err } r.blocks[topic][partition] = block } } return nil } func (r *OffsetRequest) key() int16 { return 2 } func (r *OffsetRequest) version() int16 { return r.Version } func (r *OffsetRequest) headerVersion() int16 { return 1 } func (r *OffsetRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_1_0_0 case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 case 0: return V0_8_2_0 default: return V2_0_0_0 } } func (r *OffsetRequest) SetReplicaID(id int32) { r.replicaID = id r.isReplicaIDSet = true } func (r *OffsetRequest) ReplicaID() int32 { if r.isReplicaIDSet { return r.replicaID } return -1 } func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetRequestBlock) } if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*offsetRequestBlock) } tmp := new(offsetRequestBlock) tmp.currentLeaderEpoch = -1 tmp.timestamp = timestamp if r.Version == 0 { tmp.maxNumOffsets = maxOffsets } r.blocks[topic][partitionID] = tmp } golang-github-ibm-sarama-1.43.2/offset_request_test.go000066400000000000000000000053601461256741300230000ustar00rootroot00000000000000package sarama import "testing" var ( offsetRequestNoBlocksV1 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, } offsetRequestNoBlocksV2 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, } offsetRequestOneBlock = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, } offsetRequestOneBlockV1 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'b', 'a', 'r', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, } offsetRequestOneBlockReadCommittedV2 = []byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'b', 'a', 'r', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, } offsetRequestReplicaID = []byte{ 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x00, } offsetRequestV4 = []byte{ 0xff, 0xff, 0xff, 0xff, // replicaID 0x01, // IsolationLevel 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x64, 0x6e, 0x77, 0x65, // topic name 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, // partitionID 0xff, 0xff, 0xff, 0xff, // leader epoch 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // timestamp } ) func TestOffsetRequest(t *testing.T) { request := new(OffsetRequest) testRequest(t, "no blocks", request, offsetRequestNoBlocksV1) request.AddBlock("foo", 4, 1, 2) testRequest(t, "one block", request, offsetRequestOneBlock) } func TestOffsetRequestV1(t *testing.T) { request := new(OffsetRequest) request.Version = 1 testRequest(t, "no blocks", request, offsetRequestNoBlocksV1) request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1 testRequest(t, "one block", request, offsetRequestOneBlockV1) } func TestOffsetRequestV2(t *testing.T) { request := new(OffsetRequest) request.Version = 2 testRequest(t, "no blocks", request, offsetRequestNoBlocksV2) request.IsolationLevel = ReadCommitted request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1 testRequest(t, "one block", request, offsetRequestOneBlockReadCommittedV2) } func TestOffsetRequestReplicaID(t *testing.T) { request := new(OffsetRequest) replicaID := int32(42) request.SetReplicaID(replicaID) if found := request.ReplicaID(); found != replicaID { t.Errorf("replicaID: expected %v, found %v", replicaID, found) } testRequest(t, "with replica ID", request, offsetRequestReplicaID) } func TestOffsetRequestV4(t *testing.T) { request := new(OffsetRequest) request.Version = 4 request.IsolationLevel = ReadCommitted request.AddBlock("dnwe", 9, -1, -1) testRequest(t, "V4", request, offsetRequestV4) } golang-github-ibm-sarama-1.43.2/offset_response.go000066400000000000000000000104021461256741300221000ustar00rootroot00000000000000package sarama import "time" type OffsetResponseBlock struct { Err KError // Offsets contains the result offsets (for V0/V1 compatibility) Offsets []int64 // Version 0 // Timestamp contains the timestamp associated with the returned offset. Timestamp int64 // Version 1 // Offset contains the returned offset. Offset int64 // Version 1 // LeaderEpoch contains the current leader epoch of the partition. LeaderEpoch int32 } func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } b.Err = KError(tmp) if version == 0 { b.Offsets, err = pd.getInt64Array() return err } if version >= 1 { b.Timestamp, err = pd.getInt64() if err != nil { return err } b.Offset, err = pd.getInt64() if err != nil { return err } // For backwards compatibility put the offset in the offsets array too b.Offsets = []int64{b.Offset} } if version >= 4 { if b.LeaderEpoch, err = pd.getInt32(); err != nil { return err } } return nil } func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) if version == 0 { return pe.putInt64Array(b.Offsets) } if version >= 1 { pe.putInt64(b.Timestamp) pe.putInt64(b.Offset) } if version >= 4 { pe.putInt32(b.LeaderEpoch) } return nil } type OffsetResponse struct { Version int16 ThrottleTimeMs int32 Blocks map[string]map[int32]*OffsetResponseBlock } func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { if version >= 2 { r.ThrottleTimeMs, err = pd.getInt32() if err != nil { return err } } numTopics, err := pd.getArrayLength() if err != nil { return err } r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { return err } numBlocks, err := pd.getArrayLength() if err != nil { return err } r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() if err != nil { return err } block := new(OffsetResponseBlock) err = block.decode(pd, version) if err != nil { return err } r.Blocks[name][id] = block } } return nil } func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { if r.Blocks == nil { return nil } if r.Blocks[topic] == nil { return nil } return r.Blocks[topic][partition] } /* // [0 0 0 1 ntopics 0 8 109 121 95 116 111 112 105 99 topic 0 0 0 1 npartitions 0 0 0 0 id 0 0 0 0 0 1 0 0 0 0 0 1 1 1 0 0 0 1 0 8 109 121 95 116 111 112 105 99 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 1] */ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { if r.Version >= 2 { pe.putInt32(r.ThrottleTimeMs) } if err = pe.putArrayLength(len(r.Blocks)); err != nil { return err } for topic, partitions := range r.Blocks { if err = pe.putString(topic); err != nil { return err } if err = pe.putArrayLength(len(partitions)); err != nil { return err } for partition, block := range partitions { pe.putInt32(partition) if err = block.encode(pe, r.version()); err != nil { return err } } } return nil } func (r *OffsetResponse) key() int16 { return 2 } func (r *OffsetResponse) version() int16 { return r.Version } func (r *OffsetResponse) headerVersion() int16 { return 0 } func (r *OffsetResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 4 } func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 4: return V2_1_0_0 case 3: return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 case 0: return V0_8_2_0 default: return V2_0_0_0 } } func (r *OffsetResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTimeMs) * time.Millisecond } // testing API func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) } byTopic, ok := r.Blocks[topic] if !ok { byTopic = make(map[int32]*OffsetResponseBlock) r.Blocks[topic] = byTopic } byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} } golang-github-ibm-sarama-1.43.2/offset_response_test.go000066400000000000000000000073361461256741300231530ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var ( emptyOffsetResponse = []byte{ 0x00, 0x00, 0x00, 0x00, } normalOffsetResponse = []byte{ 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 'a', 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 'z', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, } normalOffsetResponseV1 = []byte{ 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 'a', 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 'z', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, } offsetResponseV4 = []byte{ 0x00, 0x00, 0x00, 0x00, // throttle time 0x00, 0x00, 0x00, 0x01, // length of topics 0x00, 0x04, 0x64, 0x6e, 0x77, 0x65, // topic name 0x00, 0x00, 0x00, 0x01, // length of partitions 0x00, 0x00, 0x00, 0x09, // partitionID 0x00, 0x00, // err 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // timestamp 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // offset 0xff, 0xff, 0xff, 0xff, // leaderEpoch } ) func TestEmptyOffsetResponse(t *testing.T) { response := OffsetResponse{} testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0) if len(response.Blocks) != 0 { t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") } response = OffsetResponse{} testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1) if len(response.Blocks) != 0 { t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") } } func TestNormalOffsetResponse(t *testing.T) { response := OffsetResponse{} testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0) if len(response.Blocks) != 2 { t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") } if len(response.Blocks["a"]) != 0 { t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") } if len(response.Blocks["z"]) != 1 { t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") } if !errors.Is(response.Blocks["z"][2].Err, ErrNoError) { t.Fatal("Decoding produced invalid error for topic z partition 2.") } if len(response.Blocks["z"][2].Offsets) != 2 { t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") } if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { t.Fatal("Decoding produced invalid offsets for topic z partition 2.") } } func TestNormalOffsetResponseV1(t *testing.T) { response := OffsetResponse{} testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1) if len(response.Blocks) != 2 { t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") } if len(response.Blocks["a"]) != 0 { t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") } if len(response.Blocks["z"]) != 1 { t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") } if !errors.Is(response.Blocks["z"][2].Err, ErrNoError) { t.Fatal("Decoding produced invalid error for topic z partition 2.") } if response.Blocks["z"][2].Timestamp != 1477920049286 { t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp) } if response.Blocks["z"][2].Offset != 6 { t.Fatal("Decoding produced invalid offsets for topic z partition 2.") } } func TestOffsetResponseV4(t *testing.T) { response := OffsetResponse{} testVersionDecodable(t, "v4", &response, offsetResponseV4, 4) } golang-github-ibm-sarama-1.43.2/packet_decoder.go000066400000000000000000000054271461256741300216430ustar00rootroot00000000000000package sarama import "github.com/rcrowley/go-metrics" // PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. // Types implementing Decoder only need to worry about calling methods like GetString, // not about how a string is represented in Kafka. type packetDecoder interface { // Primitives getInt8() (int8, error) getInt16() (int16, error) getInt32() (int32, error) getInt64() (int64, error) getVarint() (int64, error) getUVarint() (uint64, error) getFloat64() (float64, error) getArrayLength() (int, error) getCompactArrayLength() (int, error) getBool() (bool, error) getEmptyTaggedFieldArray() (int, error) // Collections getBytes() ([]byte, error) getVarintBytes() ([]byte, error) getCompactBytes() ([]byte, error) getRawBytes(length int) ([]byte, error) getString() (string, error) getNullableString() (*string, error) getCompactString() (string, error) getCompactNullableString() (*string, error) getCompactInt32Array() ([]int32, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) // Subsets remaining() int getSubset(length int) (packetDecoder, error) peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset peekInt8(offset int) (int8, error) // similar to peek, but just one byte // Stacks, see PushDecoder push(in pushDecoder) error pop() error // To record metrics when provided metricRegistry() metrics.Registry } // PushDecoder is the interface for decoding fields like CRCs and lengths where the validity // of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where // the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they // depend upon have been decoded. type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. check(curOffset int, buf []byte) error } // dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the // fields itself is unknown until its value was decoded (for instance varint encoded length // fields). // During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() type dynamicPushDecoder interface { pushDecoder decoder } golang-github-ibm-sarama-1.43.2/packet_encoder.go000066400000000000000000000051541461256741300216520ustar00rootroot00000000000000package sarama import "github.com/rcrowley/go-metrics" // PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. // Types implementing Encoder only need to worry about calling methods like PutString, // not about how a string is represented in Kafka. type packetEncoder interface { // Primitives putInt8(in int8) putInt16(in int16) putInt32(in int32) putInt64(in int64) putVarint(in int64) putUVarint(in uint64) putFloat64(in float64) putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) // Collections putBytes(in []byte) error putVarintBytes(in []byte) error putCompactBytes(in []byte) error putRawBytes(in []byte) error putCompactString(in string) error putNullableCompactString(in *string) error putString(in string) error putNullableString(in *string) error putStringArray(in []string) error putCompactInt32Array(in []int32) error putNullableCompactInt32Array(in []int32) error putInt32Array(in []int32) error putInt64Array(in []int64) error putEmptyTaggedFieldArray() // Provide the current offset to record the batch size metric offset() int // Stacks, see PushEncoder push(in pushEncoder) pop() error // To record metrics when provided metricRegistry() metrics.Registry } // PushEncoder is the interface for encoding fields like CRCs and lengths where the value // of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where // the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they // depend upon have been written. type pushEncoder interface { // Saves the offset into the input buffer as the location to actually write the calculated value when able. saveOffset(in int) // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and write the field. // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes // of data to the saved offset, based on the data between the saved offset and curOffset. run(curOffset int, buf []byte) error } // dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the // fields itself is unknown until its value was computed (for instance varint encoded length // fields). type dynamicPushEncoder interface { pushEncoder // Called during pop() to adjust the length of the field. // It should return the difference in bytes between the last computed length and current length. adjustLength(currOffset int) int } golang-github-ibm-sarama-1.43.2/partitioner.go000066400000000000000000000210111461256741300212320ustar00rootroot00000000000000package sarama import ( "hash" "hash/crc32" "hash/fnv" "math/rand" "time" ) // Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], // decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided // as simple default implementations. type Partitioner interface { // Partition takes a message and partition count and chooses a partition Partition(message *ProducerMessage, numPartitions int32) (int32, error) // RequiresConsistency indicates to the user of the partitioner whether the // mapping of key->partition is consistent or not. Specifically, if a // partitioner requires consistency then it must be allowed to choose from all // partitions (even ones known to be unavailable), and its choice must be // respected by the caller. The obvious example is the HashPartitioner. RequiresConsistency() bool } // DynamicConsistencyPartitioner can optionally be implemented by Partitioners // in order to allow more flexibility than is originally allowed by the // RequiresConsistency method in the Partitioner interface. This allows // partitioners to require consistency sometimes, but not all times. It's useful // for, e.g., the HashPartitioner, which does not require consistency if the // message key is nil. type DynamicConsistencyPartitioner interface { Partitioner // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency, // but takes in the message being partitioned so that the partitioner can // make a per-message determination. MessageRequiresConsistency(message *ProducerMessage) bool } // PartitionerConstructor is the type for a function capable of constructing new Partitioners. type PartitionerConstructor func(topic string) Partitioner type manualPartitioner struct{} // HashPartitionerOption lets you modify default values of the partitioner type HashPartitionerOption func(*hashPartitioner) // WithAbsFirst means that the partitioner handles absolute values // in the same way as the reference Java implementation func WithAbsFirst() HashPartitionerOption { return func(hp *hashPartitioner) { hp.referenceAbs = true } } // WithHashUnsigned means the partitioner treats the hashed value as unsigned when // partitioning. This is intended to be combined with the crc32 hash algorithm to // be compatible with librdkafka's implementation func WithHashUnsigned() HashPartitionerOption { return func(hp *hashPartitioner) { hp.hashUnsigned = true } } // WithCustomHashFunction lets you specify what hash function to use for the partitioning func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { return func(hp *hashPartitioner) { hp.hasher = hasher() } } // WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption { return func(hp *hashPartitioner) { hp.random = randomHP } } // NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided // ProducerMessage's Partition field as the partition to produce to. func NewManualPartitioner(topic string) Partitioner { return new(manualPartitioner) } func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { return message.Partition, nil } func (p *manualPartitioner) RequiresConsistency() bool { return true } type randomPartitioner struct { generator *rand.Rand } // NewRandomPartitioner returns a Partitioner which chooses a random partition each time. func NewRandomPartitioner(topic string) Partitioner { p := new(randomPartitioner) p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) return p } func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { return int32(p.generator.Intn(int(numPartitions))), nil } func (p *randomPartitioner) RequiresConsistency() bool { return false } type roundRobinPartitioner struct { partition int32 } // NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. func NewRoundRobinPartitioner(topic string) Partitioner { return &roundRobinPartitioner{} } func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { if p.partition >= numPartitions { p.partition = 0 } ret := p.partition p.partition++ return ret, nil } func (p *roundRobinPartitioner) RequiresConsistency() bool { return false } type hashPartitioner struct { random Partitioner hasher hash.Hash32 referenceAbs bool hashUnsigned bool } // NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. // The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that // each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { return func(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = hasher() p.referenceAbs = false p.hashUnsigned = false return p } } // NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor { return func(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false p.hashUnsigned = false for _, option := range options { option(p) } return p } } // NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a // random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, // modulus the number of partitions. This ensures that messages with the same key always end up on the // same partition. func NewHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false p.hashUnsigned = false return p } // NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values // in the same way as the reference Java implementation. NewHashPartitioner was supposed to do // that but it had a mistake and now there are people depending on both behaviors. This will // all go away on the next major version bump. func NewReferenceHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = true p.hashUnsigned = false return p } // NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash // of the encoded bytes of the message key modulus the number of partitions. This is compatible with // librdkafka's `consistent_random` partitioner func NewConsistentCRCHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = crc32.NewIEEE() p.referenceAbs = false p.hashUnsigned = true return p } func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { if message.Key == nil { return p.random.Partition(message, numPartitions) } bytes, err := message.Key.Encode() if err != nil { return -1, err } p.hasher.Reset() _, err = p.hasher.Write(bytes) if err != nil { return -1, err } var partition int32 // Turns out we were doing our absolute value in a subtly different way from the upstream // implementation, but now we need to maintain backwards compat for people who started using // the old version; if referenceAbs is set we are compatible with the reference java client // but not past Sarama versions if p.referenceAbs { partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions } else if p.hashUnsigned { // librdkafka treats the hashed value as unsigned. If `hashUnsigned` is set we are compatible // with librdkafka's `consistent` partitioning but not past Sarama versions partition = int32(p.hasher.Sum32() % uint32(numPartitions)) } else { partition = int32(p.hasher.Sum32()) % numPartitions if partition < 0 { partition = -partition } } return partition, nil } func (p *hashPartitioner) RequiresConsistency() bool { return true } func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool { return message.Key != nil } golang-github-ibm-sarama-1.43.2/partitioner_test.go000066400000000000000000000251411461256741300223010ustar00rootroot00000000000000package sarama import ( "crypto/rand" "hash/crc32" "hash/fnv" "log" "testing" ) func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { choice, err := partitioner.Partition(message, numPartitions) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= numPartitions { t.Error(partitioner, "returned partition", choice, "outside of range for", message) } for i := 1; i < 50; i++ { newChoice, err := partitioner.Partition(message, numPartitions) if err != nil { t.Error(partitioner, err) } if newChoice != choice { t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") } } } type partitionerTestCase struct { key string expectedPartition int32 } func partitionAndAssert(t *testing.T, partitioner Partitioner, numPartitions int32, testCase partitionerTestCase) { t.Run("partitionAndAssert "+testCase.key, func(t *testing.T) { msg := &ProducerMessage{ Key: StringEncoder(testCase.key), } partition, err := partitioner.Partition(msg, numPartitions) if err != nil { t.Error(partitioner, err) } if partition != testCase.expectedPartition { t.Error(partitioner, "partitioning", testCase.key, "returned partition", partition, "but expected", testCase.expectedPartition, ".") } }) } func TestRandomPartitioner(t *testing.T) { partitioner := NewRandomPartitioner("mytopic") choice, err := partitioner.Partition(nil, 1) if err != nil { t.Error(partitioner, err) } if choice != 0 { t.Error("Returned non-zero partition when only one available.") } for i := 1; i < 50; i++ { choice, err := partitioner.Partition(nil, 50) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= 50 { t.Error("Returned partition", choice, "outside of range.") } } } func TestRoundRobinPartitioner(t *testing.T) { partitioner := NewRoundRobinPartitioner("mytopic") choice, err := partitioner.Partition(nil, 1) if err != nil { t.Error(partitioner, err) } if choice != 0 { t.Error("Returned non-zero partition when only one available.") } var i int32 for i = 1; i < 50; i++ { choice, err := partitioner.Partition(nil, 7) if err != nil { t.Error(partitioner, err) } if choice != i%7 { t.Error("Returned partition", choice, "expecting", i%7) } } } func TestNewHashPartitionerWithHasher(t *testing.T) { // use the current default hasher fnv.New32a() partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic") choice, err := partitioner.Partition(&ProducerMessage{}, 1) if err != nil { t.Error(partitioner, err) } if choice != 0 { t.Error("Returned non-zero partition when only one available.") } for i := 1; i < 50; i++ { choice, err := partitioner.Partition(&ProducerMessage{}, 50) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= 50 { t.Error("Returned partition", choice, "outside of range for nil key.") } } buf := make([]byte, 256) for i := 1; i < 50; i++ { if _, err := rand.Read(buf); err != nil { t.Error(err) } assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) } } func TestHashPartitionerWithHasherMinInt32(t *testing.T) { // use the current default hasher fnv.New32a() partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic") msg := ProducerMessage{} // "1468509572224" generates 2147483648 (uint32) result from Sum32 function // which is -2147483648 or int32's min value msg.Key = StringEncoder("1468509572224") choice, err := partitioner.Partition(&msg, 50) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= 50 { t.Error("Returned partition", choice, "outside of range for nil key.") } } func TestHashPartitioner(t *testing.T) { partitioner := NewHashPartitioner("mytopic") choice, err := partitioner.Partition(&ProducerMessage{}, 1) if err != nil { t.Error(partitioner, err) } if choice != 0 { t.Error("Returned non-zero partition when only one available.") } for i := 1; i < 50; i++ { choice, err := partitioner.Partition(&ProducerMessage{}, 50) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= 50 { t.Error("Returned partition", choice, "outside of range for nil key.") } } buf := make([]byte, 256) for i := 1; i < 50; i++ { if _, err := rand.Read(buf); err != nil { t.Error(err) } assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) } } func TestHashPartitionerConsistency(t *testing.T) { partitioner := NewHashPartitioner("mytopic") ep, ok := partitioner.(DynamicConsistencyPartitioner) if !ok { t.Error("Hash partitioner does not implement DynamicConsistencyPartitioner") } consistency := ep.MessageRequiresConsistency(&ProducerMessage{Key: StringEncoder("hi")}) if !consistency { t.Error("Messages with keys should require consistency") } consistency = ep.MessageRequiresConsistency(&ProducerMessage{}) if consistency { t.Error("Messages without keys should require consistency") } } func TestHashPartitionerMinInt32(t *testing.T) { partitioner := NewHashPartitioner("mytopic") msg := ProducerMessage{} // "1468509572224" generates 2147483648 (uint32) result from Sum32 function // which is -2147483648 or int32's min value msg.Key = StringEncoder("1468509572224") choice, err := partitioner.Partition(&msg, 50) if err != nil { t.Error(partitioner, err) } if choice < 0 || choice >= 50 { t.Error("Returned partition", choice, "outside of range for nil key.") } } func TestConsistentCRCHashPartitioner(t *testing.T) { numPartitions := int32(100) partitioner := NewConsistentCRCHashPartitioner("mytopic") testCases := []partitionerTestCase{ { key: "abc123def456", expectedPartition: 57, }, { // `SheetJS` has a crc32 hash value of 2647669026 (which is -1647298270 as a signed int32) // Modding the signed value will give a partition of 70. Modding the unsigned value will give 26 key: "SheetJS", expectedPartition: 26, }, { key: "9e8c7f4cf45857cfff7645d6", expectedPartition: 24, }, { key: "3900446192ff85a5f67da10c", expectedPartition: 75, }, { key: "0f4407b7a67d6d27de372198", expectedPartition: 50, }, } for _, tc := range testCases { partitionAndAssert(t, partitioner, numPartitions, tc) } } func TestCustomPartitionerWithConsistentHashing(t *testing.T) { // Setting both `hashUnsigned` and the hash function to `crc32.NewIEEE` is equivalent to using `NewConsistentCRCHashPartitioner` partitioner := NewCustomPartitioner( WithHashUnsigned(), WithCustomHashFunction(crc32.NewIEEE), )("mytopic") // See above re: why `SheetJS` msg := ProducerMessage{ Key: StringEncoder("SheetJS"), } choice, err := partitioner.Partition(&msg, 100) if err != nil { t.Error(partitioner, err) } expectedPartition := int32(26) if choice != expectedPartition { t.Error(partitioner, "returned partition", choice, "but expected", expectedPartition, ".") } } func TestManualPartitioner(t *testing.T) { partitioner := NewManualPartitioner("mytopic") choice, err := partitioner.Partition(&ProducerMessage{}, 1) if err != nil { t.Error(partitioner, err) } if choice != 0 { t.Error("Returned non-zero partition when only one available.") } for i := int32(1); i < 50; i++ { choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) if err != nil { t.Error(partitioner, err) } if choice != i { t.Error("Returned partition not the same as the input partition") } } } func TestWithCustomFallbackPartitioner(t *testing.T) { topic := "mytopic" partitioner := NewCustomPartitioner( // override default random partitioner with round robin WithCustomFallbackPartitioner(NewRoundRobinPartitioner(topic)), )(topic) // Should use round robin implementation if there is no key var i int32 for i = 0; i < 50; i++ { choice, err := partitioner.Partition(&ProducerMessage{Key: nil}, 7) if err != nil { t.Error(partitioner, err) } if choice != i%7 { t.Error("Returned partition", choice, "expecting", i%7) } } // should use hash partitioner if key is specified buf := make([]byte, 256) for i := 0; i < 50; i++ { if _, err := rand.Read(buf); err != nil { t.Error(err) } assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) } } // By default, Sarama uses the message's key to consistently assign a partition to // a message using hashing. If no key is set, a random partition will be chosen. // This example shows how you can partition messages randomly, even when a key is set, // by overriding Config.Producer.Partitioner. func ExamplePartitioner_random() { config := NewTestConfig() config.Producer.Partitioner = NewRandomPartitioner producer, err := NewSyncProducer([]string{"localhost:9092"}, config) if err != nil { log.Println(err) return } defer func() { if err := producer.Close(); err != nil { log.Println("Failed to close producer:", err) } }() msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} partition, offset, err := producer.SendMessage(msg) if err != nil { log.Println("Failed to produce message to kafka cluster.") return } log.Printf("Produced message to partition %d with offset %d", partition, offset) } // This example shows how to assign partitions to your messages manually. func ExamplePartitioner_manual() { config := NewTestConfig() // First, we tell the producer that we are going to partition ourselves. config.Producer.Partitioner = NewManualPartitioner producer, err := NewSyncProducer([]string{"localhost:9092"}, config) if err != nil { log.Println(err) return } defer func() { if err := producer.Close(); err != nil { log.Println("Failed to close producer:", err) } }() // Now, we set the Partition field of the ProducerMessage struct. msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} partition, offset, err := producer.SendMessage(msg) if err != nil { log.Println("Failed to produce message to kafka cluster.") return } if partition != 6 { log.Println("Message should have been produced to partition 6!") return } log.Printf("Produced message to partition %d with offset %d", partition, offset) } // This example shows how to set a different partitioner depending on the topic. func ExamplePartitioner_per_topic() { config := NewTestConfig() config.Producer.Partitioner = func(topic string) Partitioner { switch topic { case "access_log", "error_log": return NewRandomPartitioner(topic) default: return NewHashPartitioner(topic) } } // ... } golang-github-ibm-sarama-1.43.2/prep_encoder.go000066400000000000000000000076301461256741300213520ustar00rootroot00000000000000package sarama import ( "encoding/binary" "errors" "fmt" "math" "github.com/rcrowley/go-metrics" ) type prepEncoder struct { stack []pushEncoder length int } // primitives func (pe *prepEncoder) putInt8(in int8) { pe.length++ } func (pe *prepEncoder) putInt16(in int16) { pe.length += 2 } func (pe *prepEncoder) putInt32(in int32) { pe.length += 4 } func (pe *prepEncoder) putInt64(in int64) { pe.length += 8 } func (pe *prepEncoder) putVarint(in int64) { var buf [binary.MaxVarintLen64]byte pe.length += binary.PutVarint(buf[:], in) } func (pe *prepEncoder) putUVarint(in uint64) { var buf [binary.MaxVarintLen64]byte pe.length += binary.PutUvarint(buf[:], in) } func (pe *prepEncoder) putFloat64(in float64) { pe.length += 8 } func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} } pe.length += 4 return nil } func (pe *prepEncoder) putCompactArrayLength(in int) { pe.putUVarint(uint64(in + 1)) } func (pe *prepEncoder) putBool(in bool) { pe.length++ } // arrays func (pe *prepEncoder) putBytes(in []byte) error { pe.length += 4 if in == nil { return nil } return pe.putRawBytes(in) } func (pe *prepEncoder) putVarintBytes(in []byte) error { if in == nil { pe.putVarint(-1) return nil } pe.putVarint(int64(len(in))) return pe.putRawBytes(in) } func (pe *prepEncoder) putCompactBytes(in []byte) error { pe.putUVarint(uint64(len(in) + 1)) return pe.putRawBytes(in) } func (pe *prepEncoder) putCompactString(in string) error { pe.putCompactArrayLength(len(in)) return pe.putRawBytes([]byte(in)) } func (pe *prepEncoder) putNullableCompactString(in *string) error { if in == nil { pe.putUVarint(0) return nil } else { return pe.putCompactString(*in) } } func (pe *prepEncoder) putRawBytes(in []byte) error { if len(in) > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} } pe.length += len(in) return nil } func (pe *prepEncoder) putNullableString(in *string) error { if in == nil { pe.length += 2 return nil } return pe.putString(*in) } func (pe *prepEncoder) putString(in string) error { pe.length += 2 if len(in) > math.MaxInt16 { return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} } pe.length += len(in) return nil } func (pe *prepEncoder) putStringArray(in []string) error { err := pe.putArrayLength(len(in)) if err != nil { return err } for _, str := range in { if err := pe.putString(str); err != nil { return err } } return nil } func (pe *prepEncoder) putCompactInt32Array(in []int32) error { if in == nil { return errors.New("expected int32 array to be non null") } pe.putUVarint(uint64(len(in)) + 1) pe.length += 4 * len(in) return nil } func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { if in == nil { pe.putUVarint(0) return nil } pe.putUVarint(uint64(len(in)) + 1) pe.length += 4 * len(in) return nil } func (pe *prepEncoder) putInt32Array(in []int32) error { err := pe.putArrayLength(len(in)) if err != nil { return err } pe.length += 4 * len(in) return nil } func (pe *prepEncoder) putInt64Array(in []int64) error { err := pe.putArrayLength(len(in)) if err != nil { return err } pe.length += 8 * len(in) return nil } func (pe *prepEncoder) putEmptyTaggedFieldArray() { pe.putUVarint(0) } func (pe *prepEncoder) offset() int { return pe.length } // stackable func (pe *prepEncoder) push(in pushEncoder) { in.saveOffset(pe.length) pe.length += in.reserveLength() pe.stack = append(pe.stack, in) } func (pe *prepEncoder) pop() error { in := pe.stack[len(pe.stack)-1] pe.stack = pe.stack[:len(pe.stack)-1] if dpe, ok := in.(dynamicPushEncoder); ok { pe.length += dpe.adjustLength(pe.length) } return nil } // we do not record metrics during the prep encoder pass func (pe *prepEncoder) metricRegistry() metrics.Registry { return nil } golang-github-ibm-sarama-1.43.2/produce_request.go000066400000000000000000000166561461256741300221260ustar00rootroot00000000000000package sarama import "github.com/rcrowley/go-metrics" // RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements // it must see before responding. Any of the constants defined here are valid. On broker versions // prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many // acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced // by setting the `min.isr` value in the brokers configuration). type RequiredAcks int16 const ( // NoResponse doesn't send any response, the TCP ACK is all you get. NoResponse RequiredAcks = 0 // WaitForLocal waits for only the local commit to succeed before responding. WaitForLocal RequiredAcks = 1 // WaitForAll waits for all in-sync replicas to commit before responding. // The minimum number of in-sync replicas is configured on the broker via // the `min.insync.replicas` configuration key. WaitForAll RequiredAcks = -1 ) type ProduceRequest struct { TransactionalID *string RequiredAcks RequiredAcks Timeout int32 Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 records map[string]map[int32]Records } func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, topicCompressionRatioMetric metrics.Histogram, ) int64 { var topicRecordCount int64 for _, messageBlock := range msgSet.Messages { // Is this a fake "message" wrapping real messages? if messageBlock.Msg.Set != nil { topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) } else { // A single uncompressed message topicRecordCount++ } // Better be safe than sorry when computing the compression ratio if messageBlock.Msg.compressedSize != 0 { compressionRatio := float64(len(messageBlock.Msg.Value)) / float64(messageBlock.Msg.compressedSize) // Histogram do not support decimal values, let's multiple it by 100 for better precision intCompressionRatio := int64(100 * compressionRatio) compressionRatioMetric.Update(intCompressionRatio) topicCompressionRatioMetric.Update(intCompressionRatio) } } return topicRecordCount } func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, topicCompressionRatioMetric metrics.Histogram, ) int64 { if recordBatch.compressedRecords != nil { compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) compressionRatioMetric.Update(compressionRatio) topicCompressionRatioMetric.Update(compressionRatio) } return int64(len(recordBatch.Records)) } func (r *ProduceRequest) encode(pe packetEncoder) error { if r.Version >= 3 { if err := pe.putNullableString(r.TransactionalID); err != nil { return err } } pe.putInt16(int16(r.RequiredAcks)) pe.putInt32(r.Timeout) metricRegistry := pe.metricRegistry() var batchSizeMetric metrics.Histogram var compressionRatioMetric metrics.Histogram if metricRegistry != nil { batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) } totalRecordCount := int64(0) err := pe.putArrayLength(len(r.records)) if err != nil { return err } for topic, partitions := range r.records { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(partitions)) if err != nil { return err } topicRecordCount := int64(0) var topicCompressionRatioMetric metrics.Histogram if metricRegistry != nil { topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) } for id, records := range partitions { startOffset := pe.offset() pe.putInt32(id) pe.push(&lengthField{}) err = records.encode(pe) if err != nil { return err } err = pe.pop() if err != nil { return err } if metricRegistry != nil { if r.Version >= 3 { topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) } else { topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) } batchSize := int64(pe.offset() - startOffset) batchSizeMetric.Update(batchSize) getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) } } if topicRecordCount > 0 { getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) totalRecordCount += topicRecordCount } } if totalRecordCount > 0 { metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) } return nil } func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { r.Version = version if version >= 3 { id, err := pd.getNullableString() if err != nil { return err } r.TransactionalID = id } requiredAcks, err := pd.getInt16() if err != nil { return err } r.RequiredAcks = RequiredAcks(requiredAcks) if r.Timeout, err = pd.getInt32(); err != nil { return err } topicCount, err := pd.getArrayLength() if err != nil { return err } if topicCount == 0 { return nil } r.records = make(map[string]map[int32]Records) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { return err } partitionCount, err := pd.getArrayLength() if err != nil { return err } r.records[topic] = make(map[int32]Records) for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } size, err := pd.getInt32() if err != nil { return err } recordsDecoder, err := pd.getSubset(int(size)) if err != nil { return err } var records Records if err := records.decode(recordsDecoder); err != nil { return err } r.records[topic][partition] = records } } return nil } func (r *ProduceRequest) key() int16 { return 0 } func (r *ProduceRequest) version() int16 { return r.Version } func (r *ProduceRequest) headerVersion() int16 { return 1 } func (r *ProduceRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_1_0_0 case 6: return V2_0_0_0 case 4, 5: return V1_0_0_0 case 3: return V0_11_0_0 case 2: return V0_10_0_0 case 1: return V0_9_0_0 case 0: return V0_8_2_0 default: return V2_1_0_0 } } func (r *ProduceRequest) ensureRecords(topic string, partition int32) { if r.records == nil { r.records = make(map[string]map[int32]Records) } if r.records[topic] == nil { r.records[topic] = make(map[int32]Records) } } func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { r.ensureRecords(topic, partition) set := r.records[topic][partition].MsgSet if set == nil { set = new(MessageSet) r.records[topic][partition] = newLegacyRecords(set) } set.addMessage(msg) } func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { r.ensureRecords(topic, partition) r.records[topic][partition] = newLegacyRecords(set) } func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { r.ensureRecords(topic, partition) r.records[topic][partition] = newDefaultRecords(batch) } golang-github-ibm-sarama-1.43.2/produce_request_test.go000066400000000000000000000053731461256741300231570ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var ( produceRequestEmpty = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } produceRequestHeader = []byte{ 0x01, 0x23, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x00, } produceRequestOneMessage = []byte{ 0x01, 0x23, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 't', 'o', 'p', 'i', 'c', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAD, 0x00, 0x00, 0x00, 0x1C, // messageSet 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // message 0x23, 0x96, 0x4a, 0xf7, // CRC 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, } produceRequestOneRecord = []byte{ 0xFF, 0xFF, // Transaction ID 0x01, 0x23, // Required Acks 0x00, 0x00, 0x04, 0x44, // Timeout 0x00, 0x00, 0x00, 0x01, // Number of Topics 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic 0x00, 0x00, 0x00, 0x01, // Number of Partitions 0x00, 0x00, 0x00, 0xAD, // Partition 0x00, 0x00, 0x00, 0x52, // Records length // recordBatch 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x02, 0xCA, 0x33, 0xBC, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // record 0x28, 0x00, 0x0A, 0x00, 0x08, 0x01, 0x02, 0x03, 0x04, 0x06, 0x05, 0x06, 0x07, 0x02, 0x06, 0x08, 0x09, 0x0A, 0x04, 0x0B, 0x0C, } ) func TestProduceRequest(t *testing.T) { request := new(ProduceRequest) testRequest(t, "empty", request, produceRequestEmpty) request.RequiredAcks = 0x123 request.Timeout = 0x444 testRequest(t, "header", request, produceRequestHeader) request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) testRequest(t, "one message", request, produceRequestOneMessage) request.Version = 3 batch := &RecordBatch{ LastOffsetDelta: 1, Version: 2, FirstTimestamp: time.Unix(1479847795, 0), MaxTimestamp: time.Unix(0, 0), Records: []*Record{{ TimestampDelta: 5 * time.Millisecond, Key: []byte{0x01, 0x02, 0x03, 0x04}, Value: []byte{0x05, 0x06, 0x07}, Headers: []*RecordHeader{{ Key: []byte{0x08, 0x09, 0x0A}, Value: []byte{0x0B, 0x0C}, }}, }}, } request.AddBatch("topic", 0xAD, batch) packet := testRequestEncode(t, "one record", request, produceRequestOneRecord) // compressRecords field is not populated on decoding because consumers // are only interested in decoded records. batch.compressedRecords = nil testRequestDecode(t, "one record", request, packet) } golang-github-ibm-sarama-1.43.2/produce_response.go000066400000000000000000000114011461256741300222530ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) // Protocol, http://kafka.apache.org/protocol.html // v1 // v2 = v3 = v4 // v5 = v6 = v7 // Produce Response (Version: 7) => [responses] throttle_time_ms // responses => topic [partition_responses] // topic => STRING // partition_responses => partition error_code base_offset log_append_time log_start_offset // partition => INT32 // error_code => INT16 // base_offset => INT64 // log_append_time => INT64 // log_start_offset => INT64 // throttle_time_ms => INT32 // partition_responses in protocol type ProduceResponseBlock struct { Err KError // v0, error_code Offset int64 // v0, base_offset Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` StartOffset int64 // v5, log_start_offset } func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err } b.Err = KError(tmp) b.Offset, err = pd.getInt64() if err != nil { return err } if version >= 2 { if millis, err := pd.getInt64(); err != nil { return err } else if millis != -1 { b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) } } if version >= 5 { b.StartOffset, err = pd.getInt64() if err != nil { return err } } return nil } func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) pe.putInt64(b.Offset) if version >= 2 { timestamp := int64(-1) if !b.Timestamp.Before(time.Unix(0, 0)) { timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) } else if !b.Timestamp.IsZero() { return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} } pe.putInt64(timestamp) } if version >= 5 { pe.putInt64(b.StartOffset) } return nil } type ProduceResponse struct { Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses Version int16 ThrottleTime time.Duration // v1, throttle_time_ms } func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version numTopics, err := pd.getArrayLength() if err != nil { return err } r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) for i := 0; i < numTopics; i++ { name, err := pd.getString() if err != nil { return err } numBlocks, err := pd.getArrayLength() if err != nil { return err } r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) for j := 0; j < numBlocks; j++ { id, err := pd.getInt32() if err != nil { return err } block := new(ProduceResponseBlock) err = block.decode(pd, version) if err != nil { return err } r.Blocks[name][id] = block } } if r.Version >= 1 { millis, err := pd.getInt32() if err != nil { return err } r.ThrottleTime = time.Duration(millis) * time.Millisecond } return nil } func (r *ProduceResponse) encode(pe packetEncoder) error { err := pe.putArrayLength(len(r.Blocks)) if err != nil { return err } for topic, partitions := range r.Blocks { err = pe.putString(topic) if err != nil { return err } err = pe.putArrayLength(len(partitions)) if err != nil { return err } for id, prb := range partitions { pe.putInt32(id) err = prb.encode(pe, r.Version) if err != nil { return err } } } if r.Version >= 1 { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } return nil } func (r *ProduceResponse) key() int16 { return 0 } func (r *ProduceResponse) version() int16 { return r.Version } func (r *ProduceResponse) headerVersion() int16 { return 0 } func (r *ProduceResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 7 } func (r *ProduceResponse) requiredVersion() KafkaVersion { switch r.Version { case 7: return V2_1_0_0 case 6: return V2_0_0_0 case 4, 5: return V1_0_0_0 case 3: return V0_11_0_0 case 2: return V0_10_0_0 case 1: return V0_9_0_0 case 0: return V0_8_2_0 default: return V2_1_0_0 } } func (r *ProduceResponse) throttleTime() time.Duration { return r.ThrottleTime } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { if r.Blocks == nil { return nil } if r.Blocks[topic] == nil { return nil } return r.Blocks[topic][partition] } // Testing API func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) } byTopic, ok := r.Blocks[topic] if !ok { byTopic = make(map[int32]*ProduceResponseBlock) r.Blocks[topic] = byTopic } block := &ProduceResponseBlock{ Err: err, } if r.Version >= 2 { block.Timestamp = time.Now() } byTopic[partition] = block } golang-github-ibm-sarama-1.43.2/produce_response_test.go000066400000000000000000000111241461256741300233140ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "testing" "time" ) var ( produceResponseNoBlocksV0 = []byte{ 0x00, 0x00, 0x00, 0x00, } produceResponseManyBlocksVersions = map[int][]byte{ 0: { 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, // Partition 1 0x00, 0x02, // ErrInvalidMessage 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 }, 1: { 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, // Partition 1 0x00, 0x02, // ErrInvalidMessage 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 0x00, 0x00, 0x00, 0x64, // 100 ms throttle time }, 2: { 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, // Partition 1 0x00, 0x02, // ErrInvalidMessage 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used) 0x00, 0x00, 0x00, 0x64, // 100 ms throttle time }, 7: { // version 7 adds StartOffset 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, // Partition 1 0x00, 0x02, // ErrInvalidMessage 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, // StartOffset 50 0x00, 0x00, 0x00, 0x64, // 100 ms throttle time }, } ) func TestProduceResponseDecode(t *testing.T) { response := ProduceResponse{} testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocksV0, 0) if len(response.Blocks) != 0 { t.Error("Decoding produced", len(response.Blocks), "topics where there were none") } for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions { t.Logf("Decoding produceResponseManyBlocks version %d", v) testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, int16(v)) if len(response.Blocks) != 1 { t.Error("Decoding produced", len(response.Blocks), "topics where there was 1") } if len(response.Blocks["foo"]) != 1 { t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there was one") } block := response.GetBlock("foo", 1) if block == nil { t.Error("Decoding did not produce a block for foo/1") } else { if !errors.Is(block.Err, ErrInvalidMessage) { t.Error("Decoding failed for foo/1/Err, got:", int16(block.Err)) } if block.Offset != 255 { t.Error("Decoding failed for foo/1/Offset, got:", block.Offset) } if v >= 2 { if !block.Timestamp.Equal(time.Unix(1, 0)) { t.Error("Decoding failed for foo/1/Timestamp, got:", block.Timestamp) } } if v >= 7 { if block.StartOffset != 50 { t.Error("Decoding failed for foo/1/StartOffset, got:", block.StartOffset) } } } if v >= 1 { if expected := 100 * time.Millisecond; response.ThrottleTime != expected { t.Error("Failed decoding produced throttle time, expected:", expected, ", got:", response.ThrottleTime) } } } } func TestProduceResponseEncode(t *testing.T) { response := ProduceResponse{} response.Blocks = make(map[string]map[int32]*ProduceResponseBlock) testEncodable(t, "empty", &response, produceResponseNoBlocksV0) response.Blocks["foo"] = make(map[int32]*ProduceResponseBlock) response.Blocks["foo"][1] = &ProduceResponseBlock{ Err: ErrInvalidMessage, Offset: 255, Timestamp: time.Unix(1, 0), StartOffset: 50, } response.ThrottleTime = 100 * time.Millisecond for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions { response.Version = int16(v) testEncodable(t, fmt.Sprintf("many blocks version %d", v), &response, produceResponseManyBlocks) } } func TestProduceResponseEncodeInvalidTimestamp(t *testing.T) { response := ProduceResponse{} response.Version = 2 response.Blocks = make(map[string]map[int32]*ProduceResponseBlock) response.Blocks["t"] = make(map[int32]*ProduceResponseBlock) response.Blocks["t"][0] = &ProduceResponseBlock{ Err: ErrNoError, Offset: 0, // Use a timestamp before Unix time Timestamp: time.Unix(0, 0).Add(-1 * time.Millisecond), } response.ThrottleTime = 100 * time.Millisecond _, err := encode(&response, nil) if err == nil { t.Error("Expecting error, got nil") } target := PacketEncodingError{} if !errors.As(err, &target) { t.Error("Expecting PacketEncodingError, got:", err) } } golang-github-ibm-sarama-1.43.2/produce_set.go000066400000000000000000000213551461256741300212210ustar00rootroot00000000000000package sarama import ( "encoding/binary" "errors" "time" ) type partitionSet struct { msgs []*ProducerMessage recordsToSend Records bufferBytes int } type produceSet struct { parent *asyncProducer msgs map[string]map[int32]*partitionSet producerID int64 producerEpoch int16 bufferBytes int bufferCount int } func newProduceSet(parent *asyncProducer) *produceSet { pid, epoch := parent.txnmgr.getProducerID() return &produceSet{ msgs: make(map[string]map[int32]*partitionSet), parent: parent, producerID: pid, producerEpoch: epoch, } } func (ps *produceSet) add(msg *ProducerMessage) error { var err error var key, val []byte if msg.Key != nil { if key, err = msg.Key.Encode(); err != nil { return err } } if msg.Value != nil { if val, err = msg.Value.Encode(); err != nil { return err } } timestamp := msg.Timestamp if timestamp.IsZero() { timestamp = time.Now() } timestamp = timestamp.Truncate(time.Millisecond) partitions := ps.msgs[msg.Topic] if partitions == nil { partitions = make(map[int32]*partitionSet) ps.msgs[msg.Topic] = partitions } var size int set := partitions[msg.Partition] if set == nil { if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { batch := &RecordBatch{ FirstTimestamp: timestamp, Version: 2, Codec: ps.parent.conf.Producer.Compression, CompressionLevel: ps.parent.conf.Producer.CompressionLevel, ProducerID: ps.producerID, ProducerEpoch: ps.producerEpoch, } if ps.parent.conf.Producer.Idempotent { batch.FirstSequence = msg.sequenceNumber } set = &partitionSet{recordsToSend: newDefaultRecords(batch)} size = recordBatchOverhead } else { set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} } partitions[msg.Partition] = set } if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { return errors.New("assertion failed: message out of sequence added to a batch") } } // Past this point we can't return an error, because we've already added the message to the set. set.msgs = append(set.msgs, msg) if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { // We are being conservative here to avoid having to prep encode the record size += maximumRecordOverhead rec := &Record{ Key: key, Value: val, TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), } size += len(key) + len(val) if len(msg.Headers) > 0 { rec.Headers = make([]*RecordHeader, len(msg.Headers)) for i := range msg.Headers { rec.Headers[i] = &msg.Headers[i] size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 } } set.recordsToSend.RecordBatch.addRecord(rec) } else { msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { msgToSend.Timestamp = timestamp msgToSend.Version = 1 } set.recordsToSend.MsgSet.addMessage(msgToSend) size = producerMessageOverhead + len(key) + len(val) } set.bufferBytes += size ps.bufferBytes += size ps.bufferCount++ return nil } func (ps *produceSet) buildRequest() *ProduceRequest { req := &ProduceRequest{ RequiredAcks: ps.parent.conf.Producer.RequiredAcks, Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), } if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 2 } if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { req.Version = 3 if ps.parent.IsTransactional() { req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID } } if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) { req.Version = 5 } if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) { req.Version = 6 } if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { req.Version = 7 } for topic, partitionSets := range ps.msgs { for partition, set := range partitionSets { if req.Version >= 3 { // If the API version we're hitting is 3 or greater, we need to calculate // offsets for each record in the batch relative to FirstOffset. // Additionally, we must set LastOffsetDelta to the value of the last offset // in the batch. Since the OffsetDelta of the first record is 0, we know that the // final record of any batch will have an offset of (# of records in batch) - 1. // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets // under the RecordBatch section for details.) rb := set.recordsToSend.RecordBatch if len(rb.Records) > 0 { rb.LastOffsetDelta = int32(len(rb.Records) - 1) for i, record := range rb.Records { record.OffsetDelta = int64(i) } } // Set the batch as transactional when a transactionalID is set rb.IsTransactional = ps.parent.IsTransactional() req.AddBatch(topic, partition, rb) continue } if ps.parent.conf.Producer.Compression == CompressionNone { req.AddSet(topic, partition, set.recordsToSend.MsgSet) } else { // When compression is enabled, the entire set for each partition is compressed // and sent as the payload of a single fake "message" with the appropriate codec // set and no key. When the server sees a message with a compression codec, it // decompresses the payload and treats the result as its message set. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { // If our version is 0.10 or later, assign relative offsets // to the inner messages. This lets the broker avoid // recompressing the message set. // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets // for details on relative offsets.) for i, msg := range set.recordsToSend.MsgSet.Messages { msg.Offset = int64(i) } } payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) } compMsg := &Message{ Codec: ps.parent.conf.Producer.Compression, CompressionLevel: ps.parent.conf.Producer.CompressionLevel, Key: nil, Value: payload, Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics } if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { compMsg.Version = 1 compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp } req.AddMessage(topic, partition, compMsg) } } } return req } func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) { for topic, partitionSet := range ps.msgs { for partition, set := range partitionSet { cb(topic, partition, set) } } } func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { if ps.msgs[topic] == nil { return nil } set := ps.msgs[topic][partition] if set == nil { return nil } ps.bufferBytes -= set.bufferBytes ps.bufferCount -= len(set.msgs) delete(ps.msgs[topic], partition) return set.msgs } func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { version := 1 if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { version = 2 } switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)): return true // Would we overflow the size-limit of a message-batch for this partition? case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: return true default: return false } } func (ps *produceSet) readyToFlush() bool { switch { // If we don't have any messages, nothing else matters case ps.empty(): return false // If all three config values are 0, we always flush as-fast-as-possible case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: return true // If we've passed the message trigger-point case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: return true // If we've passed the byte trigger-point case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: return true default: return false } } func (ps *produceSet) empty() bool { return ps.bufferCount == 0 } golang-github-ibm-sarama-1.43.2/produce_set_test.go000066400000000000000000000246741461256741300222670ustar00rootroot00000000000000package sarama import ( "fmt" "testing" "time" ) func makeProduceSet() (*asyncProducer, *produceSet) { conf := NewTestConfig() txnmgr, _ := newTransactionManager(conf, nil) parent := &asyncProducer{ conf: conf, txnmgr: txnmgr, } return parent, newProduceSet(parent) } func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) { if err := ps.add(msg); err != nil { t.Error(err) } } func TestProduceSetInitial(t *testing.T) { _, ps := makeProduceSet() if !ps.empty() { t.Error("New produceSet should be empty") } if ps.readyToFlush() { t.Error("Empty produceSet must never be ready to flush") } } func TestProduceSetAddingMessages(t *testing.T) { _, ps := makeProduceSet() msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} safeAddMessage(t, ps, msg) if ps.empty() { t.Error("set shouldn't be empty when a message is added") } if !ps.readyToFlush() { t.Error("by default set should be ready to flush when any message is in place") } } func TestProduceSetAddingMessagesOverflowMessagesLimit(t *testing.T) { parent, ps := makeProduceSet() parent.conf.Producer.Flush.MaxMessages = 1000 msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} for i := 0; i < 1000; i++ { if ps.wouldOverflow(msg) { t.Error("set shouldn't fill up after only", i+1, "messages") } safeAddMessage(t, ps, msg) } if !ps.wouldOverflow(msg) { t.Error("set should be full after 1000 messages") } } func TestProduceSetAddingMessagesOverflowBytesLimit(t *testing.T) { parent, ps := makeProduceSet() parent.conf.Producer.MaxMessageBytes = 1000 msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} for ps.bufferBytes+msg.ByteSize(2) < parent.conf.Producer.MaxMessageBytes { if ps.wouldOverflow(msg) { t.Error("set shouldn't fill up before 1000 bytes") } safeAddMessage(t, ps, msg) } if !ps.wouldOverflow(msg) { t.Error("set should be full after 1000 bytes") } } func TestProduceSetPartitionTracking(t *testing.T) { _, ps := makeProduceSet() m1 := &ProducerMessage{Topic: "t1", Partition: 0} m2 := &ProducerMessage{Topic: "t1", Partition: 1} m3 := &ProducerMessage{Topic: "t2", Partition: 0} safeAddMessage(t, ps, m1) safeAddMessage(t, ps, m2) safeAddMessage(t, ps, m3) seenT1P0 := false seenT1P1 := false seenT2P0 := false ps.eachPartition(func(topic string, partition int32, pSet *partitionSet) { if len(pSet.msgs) != 1 { t.Error("Wrong message count") } if topic == "t1" && partition == 0 { seenT1P0 = true } else if topic == "t1" && partition == 1 { seenT1P1 = true } else if topic == "t2" && partition == 0 { seenT2P0 = true } }) if !seenT1P0 { t.Error("Didn't see t1p0") } if !seenT1P1 { t.Error("Didn't see t1p1") } if !seenT2P0 { t.Error("Didn't see t2p0") } if len(ps.dropPartition("t1", 1)) != 1 { t.Error("Got wrong messages back from dropping partition") } if ps.bufferCount != 2 { t.Error("Incorrect buffer count after dropping partition") } } func TestProduceSetRequestBuilding(t *testing.T) { parent, ps := makeProduceSet() parent.conf.Producer.RequiredAcks = WaitForAll parent.conf.Producer.Timeout = 10 * time.Second msg := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), } for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) } msg.Partition = 1 for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) } msg.Topic = "t2" for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) } req := ps.buildRequest() if req.RequiredAcks != WaitForAll { t.Error("RequiredAcks not set properly") } if req.Timeout != 10000 { t.Error("Timeout not set properly") } if len(req.records) != 2 { t.Error("Wrong number of topics in request") } } func TestProduceSetCompressedRequestBuilding(t *testing.T) { parent, ps := makeProduceSet() parent.conf.Producer.RequiredAcks = WaitForAll parent.conf.Producer.Timeout = 10 * time.Second parent.conf.Producer.Compression = CompressionGZIP parent.conf.Version = V0_10_0_0 msg := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Timestamp: time.Now(), } for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) } req := ps.buildRequest() if req.Version != 2 { t.Error("Wrong request version") } for _, msgBlock := range req.records["t1"][0].MsgSet.Messages { msg := msgBlock.Msg err := msg.decodeSet() if err != nil { t.Error("Failed to decode set from payload") } for i, compMsgBlock := range msg.Set.Messages { compMsg := compMsgBlock.Msg if compMsg.Version != 1 { t.Error("Wrong compressed message version") } if compMsgBlock.Offset != int64(i) { t.Errorf("Wrong relative inner offset, expected %d, got %d", i, compMsgBlock.Offset) } } if msg.Version != 1 { t.Error("Wrong compressed parent message version") } } } func TestProduceSetV3RequestBuilding(t *testing.T) { parent, ps := makeProduceSet() parent.conf.Producer.RequiredAcks = WaitForAll parent.conf.Producer.Timeout = 10 * time.Second parent.conf.Version = V0_11_0_0 now := time.Now() msg := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Headers: []RecordHeader{ { Key: []byte("header-1"), Value: []byte("value-1"), }, { Key: []byte("header-2"), Value: []byte("value-2"), }, { Key: []byte("header-3"), Value: []byte("value-3"), }, }, Timestamp: now, } for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) msg.Timestamp = msg.Timestamp.Add(time.Second) } req := ps.buildRequest() if req.Version != 3 { t.Error("Wrong request version") } batch := req.records["t1"][0].RecordBatch if !batch.FirstTimestamp.Equal(now.Truncate(time.Millisecond)) { t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp) } for i := 0; i < 10; i++ { rec := batch.Records[i] if rec.TimestampDelta != time.Duration(i)*time.Second { t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta) } if rec.OffsetDelta != int64(i) { t.Errorf("Wrong relative inner offset, expected %d, got %d", i, rec.OffsetDelta) } for j, h := range batch.Records[i].Headers { exp := fmt.Sprintf("header-%d", j+1) if string(h.Key) != exp { t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key) } exp = fmt.Sprintf("value-%d", j+1) if string(h.Value) != exp { t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value) } } } } func TestProduceSetIdempotentRequestBuilding(t *testing.T) { const pID = 1000 const pEpoch = 1234 config := NewTestConfig() config.Producer.RequiredAcks = WaitForAll config.Producer.Idempotent = true config.Version = V0_11_0_0 parent := &asyncProducer{ conf: config, txnmgr: &transactionManager{ producerID: pID, producerEpoch: pEpoch, }, } ps := newProduceSet(parent) now := time.Now() msg := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Headers: []RecordHeader{ { Key: []byte("header-1"), Value: []byte("value-1"), }, { Key: []byte("header-2"), Value: []byte("value-2"), }, { Key: []byte("header-3"), Value: []byte("value-3"), }, }, Timestamp: now, sequenceNumber: 123, } for i := 0; i < 10; i++ { safeAddMessage(t, ps, msg) msg.Timestamp = msg.Timestamp.Add(time.Second) } req := ps.buildRequest() if req.Version != 3 { t.Error("Wrong request version") } batch := req.records["t1"][0].RecordBatch if !batch.FirstTimestamp.Equal(now.Truncate(time.Millisecond)) { t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp) } if batch.ProducerID != pID { t.Errorf("Wrong producerID: %v", batch.ProducerID) } if batch.ProducerEpoch != pEpoch { t.Errorf("Wrong producerEpoch: %v", batch.ProducerEpoch) } if batch.FirstSequence != 123 { t.Errorf("Wrong first sequence: %v", batch.FirstSequence) } for i := 0; i < 10; i++ { rec := batch.Records[i] if rec.TimestampDelta != time.Duration(i)*time.Second { t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta) } if rec.OffsetDelta != int64(i) { t.Errorf("Wrong relative inner offset, expected %d, got %d", i, rec.OffsetDelta) } for j, h := range batch.Records[i].Headers { exp := fmt.Sprintf("header-%d", j+1) if string(h.Key) != exp { t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key) } exp = fmt.Sprintf("value-%d", j+1) if string(h.Value) != exp { t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value) } } } } func TestProduceSetConsistentTimestamps(t *testing.T) { parent, ps1 := makeProduceSet() ps2 := newProduceSet(parent) parent.conf.Producer.RequiredAcks = WaitForAll parent.conf.Producer.Timeout = 10 * time.Second parent.conf.Version = V0_11_0_0 msg1 := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Timestamp: time.Unix(1555718400, 500000000), sequenceNumber: 123, } msg2 := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Timestamp: time.Unix(1555718400, 500900000), sequenceNumber: 123, } msg3 := &ProducerMessage{ Topic: "t1", Partition: 0, Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage), Timestamp: time.Unix(1555718400, 600000000), sequenceNumber: 123, } safeAddMessage(t, ps1, msg1) safeAddMessage(t, ps1, msg3) req1 := ps1.buildRequest() if req1.Version != 3 { t.Error("Wrong request version") } batch1 := req1.records["t1"][0].RecordBatch ft1 := batch1.FirstTimestamp.Unix()*1000 + int64(batch1.FirstTimestamp.Nanosecond()/1000000) time1 := ft1 + int64(batch1.Records[1].TimestampDelta/time.Millisecond) safeAddMessage(t, ps2, msg2) safeAddMessage(t, ps2, msg3) req2 := ps2.buildRequest() if req2.Version != 3 { t.Error("Wrong request version") } batch2 := req2.records["t1"][0].RecordBatch ft2 := batch2.FirstTimestamp.Unix()*1000 + int64(batch2.FirstTimestamp.Nanosecond()/1000000) time2 := ft2 + int64(batch2.Records[1].TimestampDelta/time.Millisecond) if time1 != time2 { t.Errorf("Message timestamps do not match: %v, %v", time1, time2) } } golang-github-ibm-sarama-1.43.2/quota_types.go000066400000000000000000000010731461256741300212550ustar00rootroot00000000000000package sarama type ( QuotaEntityType string QuotaMatchType int ) // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java const ( QuotaEntityUser QuotaEntityType = "user" QuotaEntityClientID QuotaEntityType = "client-id" QuotaEntityIP QuotaEntityType = "ip" ) // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java const ( QuotaMatchExact QuotaMatchType = iota QuotaMatchDefault QuotaMatchAny ) golang-github-ibm-sarama-1.43.2/real_decoder.go000066400000000000000000000215651461256741300213200ustar00rootroot00000000000000package sarama import ( "encoding/binary" "math" "github.com/rcrowley/go-metrics" ) var ( errInvalidArrayLength = PacketDecodingError{"invalid array length"} errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} errInvalidStringLength = PacketDecodingError{"invalid string length"} errVarintOverflow = PacketDecodingError{"varint overflow"} errUVarintOverflow = PacketDecodingError{"uvarint overflow"} errInvalidBool = PacketDecodingError{"invalid bool"} ) type realDecoder struct { raw []byte off int stack []pushDecoder registry metrics.Registry } // primitives func (rd *realDecoder) getInt8() (int8, error) { if rd.remaining() < 1 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := int8(rd.raw[rd.off]) rd.off++ return tmp, nil } func (rd *realDecoder) getInt16() (int16, error) { if rd.remaining() < 2 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) rd.off += 2 return tmp, nil } func (rd *realDecoder) getInt32() (int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 return tmp, nil } func (rd *realDecoder) getInt64() (int64, error) { if rd.remaining() < 8 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) rd.off += 8 return tmp, nil } func (rd *realDecoder) getVarint() (int64, error) { tmp, n := binary.Varint(rd.raw[rd.off:]) if n == 0 { rd.off = len(rd.raw) return -1, ErrInsufficientData } if n < 0 { rd.off -= n return -1, errVarintOverflow } rd.off += n return tmp, nil } func (rd *realDecoder) getUVarint() (uint64, error) { tmp, n := binary.Uvarint(rd.raw[rd.off:]) if n == 0 { rd.off = len(rd.raw) return 0, ErrInsufficientData } if n < 0 { rd.off -= n return 0, errUVarintOverflow } rd.off += n return tmp, nil } func (rd *realDecoder) getFloat64() (float64, error) { if rd.remaining() < 8 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:])) rd.off += 8 return tmp, nil } func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return -1, ErrInsufficientData } tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) rd.off += 4 if tmp > rd.remaining() { rd.off = len(rd.raw) return -1, ErrInsufficientData } else if tmp > 2*math.MaxUint16 { return -1, errInvalidArrayLength } return tmp, nil } func (rd *realDecoder) getCompactArrayLength() (int, error) { n, err := rd.getUVarint() if err != nil { return 0, err } if n == 0 { return 0, nil } return int(n) - 1, nil } func (rd *realDecoder) getBool() (bool, error) { b, err := rd.getInt8() if err != nil || b == 0 { return false, err } if b != 1 { return false, errInvalidBool } return true, nil } func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { tagCount, err := rd.getUVarint() if err != nil { return 0, err } // skip over any tagged fields without deserializing them // as we don't currently support doing anything with them for i := uint64(0); i < tagCount; i++ { // fetch and ignore tag identifier _, err := rd.getUVarint() if err != nil { return 0, err } length, err := rd.getUVarint() if err != nil { return 0, err } if _, err := rd.getRawBytes(int(length)); err != nil { return 0, err } } return 0, nil } // collections func (rd *realDecoder) getBytes() ([]byte, error) { tmp, err := rd.getInt32() if err != nil { return nil, err } if tmp == -1 { return nil, nil } return rd.getRawBytes(int(tmp)) } func (rd *realDecoder) getVarintBytes() ([]byte, error) { tmp, err := rd.getVarint() if err != nil { return nil, err } if tmp == -1 { return nil, nil } return rd.getRawBytes(int(tmp)) } func (rd *realDecoder) getCompactBytes() ([]byte, error) { n, err := rd.getUVarint() if err != nil { return nil, err } length := int(n - 1) return rd.getRawBytes(length) } func (rd *realDecoder) getStringLength() (int, error) { length, err := rd.getInt16() if err != nil { return 0, err } n := int(length) switch { case n < -1: return 0, errInvalidStringLength case n > rd.remaining(): rd.off = len(rd.raw) return 0, ErrInsufficientData } return n, nil } func (rd *realDecoder) getString() (string, error) { n, err := rd.getStringLength() if err != nil || n == -1 { return "", err } tmpStr := string(rd.raw[rd.off : rd.off+n]) rd.off += n return tmpStr, nil } func (rd *realDecoder) getNullableString() (*string, error) { n, err := rd.getStringLength() if err != nil || n == -1 { return nil, err } tmpStr := string(rd.raw[rd.off : rd.off+n]) rd.off += n return &tmpStr, err } func (rd *realDecoder) getCompactString() (string, error) { n, err := rd.getUVarint() if err != nil { return "", err } length := int(n - 1) if length < 0 { return "", errInvalidByteSliceLength } tmpStr := string(rd.raw[rd.off : rd.off+length]) rd.off += length return tmpStr, nil } func (rd *realDecoder) getCompactNullableString() (*string, error) { n, err := rd.getUVarint() if err != nil { return nil, err } length := int(n - 1) if length < 0 { return nil, err } tmpStr := string(rd.raw[rd.off : rd.off+length]) rd.off += length return &tmpStr, err } func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { n, err := rd.getUVarint() if err != nil { return nil, err } if n == 0 { return nil, nil } arrayLength := int(n) - 1 ret := make([]int32, arrayLength) for i := range ret { ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 } return ret, nil } func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return nil, ErrInsufficientData } n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 if rd.remaining() < 4*n { rd.off = len(rd.raw) return nil, ErrInsufficientData } if n == 0 { return nil, nil } if n < 0 { return nil, errInvalidArrayLength } ret := make([]int32, n) for i := range ret { ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 } return ret, nil } func (rd *realDecoder) getInt64Array() ([]int64, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return nil, ErrInsufficientData } n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 if rd.remaining() < 8*n { rd.off = len(rd.raw) return nil, ErrInsufficientData } if n == 0 { return nil, nil } if n < 0 { return nil, errInvalidArrayLength } ret := make([]int64, n) for i := range ret { ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) rd.off += 8 } return ret, nil } func (rd *realDecoder) getStringArray() ([]string, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return nil, ErrInsufficientData } n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) rd.off += 4 if n == 0 { return nil, nil } if n < 0 { return nil, errInvalidArrayLength } ret := make([]string, n) for i := range ret { str, err := rd.getString() if err != nil { return nil, err } ret[i] = str } return ret, nil } // subsets func (rd *realDecoder) remaining() int { return len(rd.raw) - rd.off } func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { buf, err := rd.getRawBytes(length) if err != nil { return nil, err } return &realDecoder{raw: buf}, nil } func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { if length < 0 { return nil, errInvalidByteSliceLength } else if length > rd.remaining() { rd.off = len(rd.raw) return nil, ErrInsufficientData } start := rd.off rd.off += length return rd.raw[start:rd.off], nil } func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { if rd.remaining() < offset+length { return nil, ErrInsufficientData } off := rd.off + offset return &realDecoder{raw: rd.raw[off : off+length]}, nil } func (rd *realDecoder) peekInt8(offset int) (int8, error) { const byteLen = 1 if rd.remaining() < offset+byteLen { return -1, ErrInsufficientData } return int8(rd.raw[rd.off+offset]), nil } // stacks func (rd *realDecoder) push(in pushDecoder) error { in.saveOffset(rd.off) var reserve int if dpd, ok := in.(dynamicPushDecoder); ok { if err := dpd.decode(rd); err != nil { return err } } else { reserve = in.reserveLength() if rd.remaining() < reserve { rd.off = len(rd.raw) return ErrInsufficientData } } rd.stack = append(rd.stack, in) rd.off += reserve return nil } func (rd *realDecoder) pop() error { // this is go's ugly pop pattern (the inverse of append) in := rd.stack[len(rd.stack)-1] rd.stack = rd.stack[:len(rd.stack)-1] return in.check(rd.off, rd.raw) } func (rd *realDecoder) metricRegistry() metrics.Registry { return rd.registry } golang-github-ibm-sarama-1.43.2/real_encoder.go000066400000000000000000000102421461256741300213200ustar00rootroot00000000000000package sarama import ( "encoding/binary" "errors" "math" "github.com/rcrowley/go-metrics" ) type realEncoder struct { raw []byte off int stack []pushEncoder registry metrics.Registry } // primitives func (re *realEncoder) putInt8(in int8) { re.raw[re.off] = byte(in) re.off++ } func (re *realEncoder) putInt16(in int16) { binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) re.off += 2 } func (re *realEncoder) putInt32(in int32) { binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) re.off += 4 } func (re *realEncoder) putInt64(in int64) { binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) re.off += 8 } func (re *realEncoder) putVarint(in int64) { re.off += binary.PutVarint(re.raw[re.off:], in) } func (re *realEncoder) putUVarint(in uint64) { re.off += binary.PutUvarint(re.raw[re.off:], in) } func (re *realEncoder) putFloat64(in float64) { binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in)) re.off += 8 } func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } func (re *realEncoder) putCompactArrayLength(in int) { // 0 represents a null array, so +1 has to be added re.putUVarint(uint64(in + 1)) } func (re *realEncoder) putBool(in bool) { if in { re.putInt8(1) return } re.putInt8(0) } // collection func (re *realEncoder) putRawBytes(in []byte) error { copy(re.raw[re.off:], in) re.off += len(in) return nil } func (re *realEncoder) putBytes(in []byte) error { if in == nil { re.putInt32(-1) return nil } re.putInt32(int32(len(in))) return re.putRawBytes(in) } func (re *realEncoder) putVarintBytes(in []byte) error { if in == nil { re.putVarint(-1) return nil } re.putVarint(int64(len(in))) return re.putRawBytes(in) } func (re *realEncoder) putCompactBytes(in []byte) error { re.putUVarint(uint64(len(in) + 1)) return re.putRawBytes(in) } func (re *realEncoder) putCompactString(in string) error { re.putCompactArrayLength(len(in)) return re.putRawBytes([]byte(in)) } func (re *realEncoder) putNullableCompactString(in *string) error { if in == nil { re.putInt8(0) return nil } return re.putCompactString(*in) } func (re *realEncoder) putString(in string) error { re.putInt16(int16(len(in))) copy(re.raw[re.off:], in) re.off += len(in) return nil } func (re *realEncoder) putNullableString(in *string) error { if in == nil { re.putInt16(-1) return nil } return re.putString(*in) } func (re *realEncoder) putStringArray(in []string) error { err := re.putArrayLength(len(in)) if err != nil { return err } for _, val := range in { if err := re.putString(val); err != nil { return err } } return nil } func (re *realEncoder) putCompactInt32Array(in []int32) error { if in == nil { return errors.New("expected int32 array to be non null") } // 0 represents a null array, so +1 has to be added re.putUVarint(uint64(len(in)) + 1) for _, val := range in { re.putInt32(val) } return nil } func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { if in == nil { re.putUVarint(0) return nil } // 0 represents a null array, so +1 has to be added re.putUVarint(uint64(len(in)) + 1) for _, val := range in { re.putInt32(val) } return nil } func (re *realEncoder) putInt32Array(in []int32) error { err := re.putArrayLength(len(in)) if err != nil { return err } for _, val := range in { re.putInt32(val) } return nil } func (re *realEncoder) putInt64Array(in []int64) error { err := re.putArrayLength(len(in)) if err != nil { return err } for _, val := range in { re.putInt64(val) } return nil } func (re *realEncoder) putEmptyTaggedFieldArray() { re.putUVarint(0) } func (re *realEncoder) offset() int { return re.off } // stacks func (re *realEncoder) push(in pushEncoder) { in.saveOffset(re.off) re.off += in.reserveLength() re.stack = append(re.stack, in) } func (re *realEncoder) pop() error { // this is go's ugly pop pattern (the inverse of append) in := re.stack[len(re.stack)-1] re.stack = re.stack[:len(re.stack)-1] return in.run(re.off, re.raw) } // we do record metrics during the real encoder pass func (re *realEncoder) metricRegistry() metrics.Registry { return re.registry } golang-github-ibm-sarama-1.43.2/record.go000066400000000000000000000042701461256741300201600ustar00rootroot00000000000000package sarama import ( "encoding/binary" "time" ) const ( isTransactionalMask = 0x10 controlMask = 0x20 maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 ) // RecordHeader stores key and value for a record header type RecordHeader struct { Key []byte Value []byte } func (h *RecordHeader) encode(pe packetEncoder) error { if err := pe.putVarintBytes(h.Key); err != nil { return err } return pe.putVarintBytes(h.Value) } func (h *RecordHeader) decode(pd packetDecoder) (err error) { if h.Key, err = pd.getVarintBytes(); err != nil { return err } if h.Value, err = pd.getVarintBytes(); err != nil { return err } return nil } // Record is kafka record type type Record struct { Headers []*RecordHeader Attributes int8 TimestampDelta time.Duration OffsetDelta int64 Key []byte Value []byte length varintLengthField } func (r *Record) encode(pe packetEncoder) error { pe.push(&r.length) pe.putInt8(r.Attributes) pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) pe.putVarint(r.OffsetDelta) if err := pe.putVarintBytes(r.Key); err != nil { return err } if err := pe.putVarintBytes(r.Value); err != nil { return err } pe.putVarint(int64(len(r.Headers))) for _, h := range r.Headers { if err := h.encode(pe); err != nil { return err } } return pe.pop() } func (r *Record) decode(pd packetDecoder) (err error) { if err = pd.push(&r.length); err != nil { return err } if r.Attributes, err = pd.getInt8(); err != nil { return err } timestamp, err := pd.getVarint() if err != nil { return err } r.TimestampDelta = time.Duration(timestamp) * time.Millisecond if r.OffsetDelta, err = pd.getVarint(); err != nil { return err } if r.Key, err = pd.getVarintBytes(); err != nil { return err } if r.Value, err = pd.getVarintBytes(); err != nil { return err } numHeaders, err := pd.getVarint() if err != nil { return err } if numHeaders >= 0 { r.Headers = make([]*RecordHeader, numHeaders) } for i := int64(0); i < numHeaders; i++ { hdr := new(RecordHeader) if err := hdr.decode(pd); err != nil { return err } r.Headers[i] = hdr } return pd.pop() } golang-github-ibm-sarama-1.43.2/record_batch.go000066400000000000000000000112571461256741300213240ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "time" ) const recordBatchOverhead = 49 type recordsArray []*Record func (e recordsArray) encode(pe packetEncoder) error { for _, r := range e { if err := r.encode(pe); err != nil { return err } } return nil } func (e recordsArray) decode(pd packetDecoder) error { records := make([]Record, len(e)) for i := range e { if err := records[i].decode(pd); err != nil { return err } e[i] = &records[i] } return nil } type RecordBatch struct { FirstOffset int64 PartitionLeaderEpoch int32 Version int8 Codec CompressionCodec CompressionLevel int Control bool LogAppendTime bool LastOffsetDelta int32 FirstTimestamp time.Time MaxTimestamp time.Time ProducerID int64 ProducerEpoch int16 FirstSequence int32 Records []*Record PartialTrailingRecord bool IsTransactional bool compressedRecords []byte recordsLen int // uncompressed records size } func (b *RecordBatch) LastOffset() int64 { return b.FirstOffset + int64(b.LastOffsetDelta) } func (b *RecordBatch) encode(pe packetEncoder) error { if b.Version != 2 { return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)} } pe.putInt64(b.FirstOffset) pe.push(&lengthField{}) pe.putInt32(b.PartitionLeaderEpoch) pe.putInt8(b.Version) pe.push(newCRC32Field(crcCastagnoli)) pe.putInt16(b.computeAttributes()) pe.putInt32(b.LastOffsetDelta) if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { return err } if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { return err } pe.putInt64(b.ProducerID) pe.putInt16(b.ProducerEpoch) pe.putInt32(b.FirstSequence) if err := pe.putArrayLength(len(b.Records)); err != nil { return err } if b.compressedRecords == nil { if err := b.encodeRecords(pe); err != nil { return err } } if err := pe.putRawBytes(b.compressedRecords); err != nil { return err } if err := pe.pop(); err != nil { return err } return pe.pop() } func (b *RecordBatch) decode(pd packetDecoder) (err error) { if b.FirstOffset, err = pd.getInt64(); err != nil { return err } batchLen, err := pd.getInt32() if err != nil { return err } if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { return err } if b.Version, err = pd.getInt8(); err != nil { return err } crc32Decoder := acquireCrc32Field(crcCastagnoli) defer releaseCrc32Field(crc32Decoder) if err = pd.push(crc32Decoder); err != nil { return err } attributes, err := pd.getInt16() if err != nil { return err } b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) b.Control = attributes&controlMask == controlMask b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask if b.LastOffsetDelta, err = pd.getInt32(); err != nil { return err } if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { return err } if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { return err } if b.ProducerID, err = pd.getInt64(); err != nil { return err } if b.ProducerEpoch, err = pd.getInt16(); err != nil { return err } if b.FirstSequence, err = pd.getInt32(); err != nil { return err } numRecs, err := pd.getArrayLength() if err != nil { return err } if numRecs >= 0 { b.Records = make([]*Record, numRecs) } bufSize := int(batchLen) - recordBatchOverhead recBuffer, err := pd.getRawBytes(bufSize) if err != nil { if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil } return err } if err = pd.pop(); err != nil { return err } recBuffer, err = decompress(b.Codec, recBuffer) if err != nil { return err } b.recordsLen = len(recBuffer) err = decode(recBuffer, recordsArray(b.Records), nil) if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil } return err } func (b *RecordBatch) encodeRecords(pe packetEncoder) error { var raw []byte var err error if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { return err } b.recordsLen = len(raw) b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) return err } func (b *RecordBatch) computeAttributes() int16 { attr := int16(b.Codec) & int16(compressionCodecMask) if b.Control { attr |= controlMask } if b.LogAppendTime { attr |= timestampTypeMask } if b.IsTransactional { attr |= isTransactionalMask } return attr } func (b *RecordBatch) addRecord(r *Record) { b.Records = append(b.Records, r) } golang-github-ibm-sarama-1.43.2/record_test.go000066400000000000000000000163351461256741300212240ustar00rootroot00000000000000package sarama import ( "reflect" "testing" "time" "github.com/davecgh/go-spew/spew" ) func recordBatchTestCases() []struct { name string batch RecordBatch encoded []byte oldGoEncoded []byte } { return []struct { name string batch RecordBatch encoded []byte oldGoEncoded []byte // used in case of gzipped content for go versions prior to 1.8 }{ { name: "empty record", batch: RecordBatch{ Version: 2, FirstTimestamp: time.Unix(0, 0), MaxTimestamp: time.Unix(0, 0), Records: []*Record{}, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 49, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 89, 95, 183, 221, // CRC 0, 0, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 0, // Number of Records }, }, { name: "control batch", batch: RecordBatch{ Version: 2, Control: true, FirstTimestamp: time.Unix(0, 0), MaxTimestamp: time.Unix(0, 0), Records: []*Record{}, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 49, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 81, 46, 67, 217, // CRC 0, 32, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 0, // Number of Records }, }, { name: "uncompressed record", batch: RecordBatch{ Version: 2, FirstTimestamp: time.Unix(1479847795, 0), MaxTimestamp: time.Unix(0, 0), LastOffsetDelta: 0, Records: []*Record{{ TimestampDelta: 5 * time.Millisecond, Key: []byte{1, 2, 3, 4}, Value: []byte{5, 6, 7}, Headers: []*RecordHeader{{ Key: []byte{8, 9, 10}, Value: []byte{11, 12}, }}, }}, recordsLen: 21, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 70, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 84, 121, 97, 253, // CRC 0, 0, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 1, // Number of Records 40, // Record Length 0, // Attributes 10, // Timestamp Delta 0, // Offset Delta 8, // Key Length 1, 2, 3, 4, 6, // Value Length 5, 6, 7, 2, // Number of Headers 6, // Header Key Length 8, 9, 10, // Header Key 4, // Header Value Length 11, 12, // Header Value }, }, { name: "gzipped record", batch: RecordBatch{ Version: 2, Codec: CompressionGZIP, CompressionLevel: CompressionLevelDefault, FirstTimestamp: time.Unix(1479847795, 0), MaxTimestamp: time.Unix(0, 0), LastOffsetDelta: 0, Records: []*Record{{ TimestampDelta: 5 * time.Millisecond, Key: []byte{1, 2, 3, 4}, Value: []byte{5, 6, 7}, Headers: []*RecordHeader{{ Key: []byte{8, 9, 10}, Value: []byte{11, 12}, }}, }}, recordsLen: 21, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 95, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 231, 74, 206, 165, // CRC 0, 1, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 1, // Number of Records 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 0, 21, 0, 234, 255, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12, 3, 0, 173, 201, 88, 103, 21, 0, 0, 0, }, }, { name: "snappy compressed record", batch: RecordBatch{ Version: 2, Codec: CompressionSnappy, FirstTimestamp: time.Unix(1479847795, 0), MaxTimestamp: time.Unix(0, 0), LastOffsetDelta: 0, Records: []*Record{{ TimestampDelta: 5 * time.Millisecond, Key: []byte{1, 2, 3, 4}, Value: []byte{5, 6, 7}, Headers: []*RecordHeader{{ Key: []byte{8, 9, 10}, Value: []byte{11, 12}, }}, }}, recordsLen: 21, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 72, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 21, 0, 159, 97, // CRC 0, 2, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 1, // Number of Records 21, 80, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12, }, }, { name: "lz4 compressed record", batch: RecordBatch{ Version: 2, Codec: CompressionLZ4, FirstTimestamp: time.Unix(1479847795, 0), MaxTimestamp: time.Unix(0, 0), LastOffsetDelta: 0, Records: []*Record{{ TimestampDelta: 5 * time.Millisecond, Key: []byte{1, 2, 3, 4}, Value: []byte{5, 6, 7}, Headers: []*RecordHeader{{ Key: []byte{8, 9, 10}, Value: []byte{11, 12}, }}, }}, recordsLen: 21, }, encoded: []byte{ 0, 0, 0, 0, 0, 0, 0, 0, // First Offset 0, 0, 0, 89, // Length 0, 0, 0, 0, // Partition Leader Epoch 2, // Version 169, 74, 119, 197, // CRC 0, 3, // Attributes 0, 0, 0, 0, // Last Offset Delta 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID 0, 0, // Producer Epoch 0, 0, 0, 0, // First Sequence 0, 0, 0, 1, // Number of Records 4, 34, 77, 24, 100, 112, 185, 21, 0, 0, 128, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12, 0, 0, 0, 0, 12, 59, 239, 146, }, }, } } func TestRecordBatchEncoding(t *testing.T) { for _, tc := range recordBatchTestCases() { tc := tc testEncodable(t, tc.name, &tc.batch, tc.encoded) } } func TestRecordBatchDecoding(t *testing.T) { for _, tc := range recordBatchTestCases() { batch := RecordBatch{} testDecodable(t, tc.name, &batch, tc.encoded) for _, r := range batch.Records { r.length = varintLengthField{} } // The compression level is not restored on decoding. It is not needed // anyway. We only set it here to ensure that comparison succeeds. batch.CompressionLevel = tc.batch.CompressionLevel if !reflect.DeepEqual(batch, tc.batch) { t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch)) } } } golang-github-ibm-sarama-1.43.2/records.go000066400000000000000000000117461461256741300203510ustar00rootroot00000000000000package sarama import "fmt" const ( unknownRecords = iota legacyRecords defaultRecords magicOffset = 16 ) // Records implements a union type containing either a RecordBatch or a legacy MessageSet. type Records struct { recordsType int MsgSet *MessageSet RecordBatch *RecordBatch } func newLegacyRecords(msgSet *MessageSet) Records { return Records{recordsType: legacyRecords, MsgSet: msgSet} } func newDefaultRecords(batch *RecordBatch) Records { return Records{recordsType: defaultRecords, RecordBatch: batch} } // setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil. // The first return value indicates whether both fields are nil (and the type is not set). // If both fields are not nil, it returns an error. func (r *Records) setTypeFromFields() (bool, error) { if r.MsgSet == nil && r.RecordBatch == nil { return true, nil } if r.MsgSet != nil && r.RecordBatch != nil { return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown") } r.recordsType = defaultRecords if r.MsgSet != nil { r.recordsType = legacyRecords } return false, nil } func (r *Records) encode(pe packetEncoder) error { if r.recordsType == unknownRecords { if empty, err := r.setTypeFromFields(); err != nil || empty { return err } } switch r.recordsType { case legacyRecords: if r.MsgSet == nil { return nil } return r.MsgSet.encode(pe) case defaultRecords: if r.RecordBatch == nil { return nil } return r.RecordBatch.encode(pe) } return fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) setTypeFromMagic(pd packetDecoder) error { magic, err := magicValue(pd) if err != nil { return err } r.recordsType = defaultRecords if magic < 2 { r.recordsType = legacyRecords } return nil } func (r *Records) decode(pd packetDecoder) error { if r.recordsType == unknownRecords { if err := r.setTypeFromMagic(pd); err != nil { return err } } switch r.recordsType { case legacyRecords: r.MsgSet = &MessageSet{} return r.MsgSet.decode(pd) case defaultRecords: r.RecordBatch = &RecordBatch{} return r.RecordBatch.decode(pd) } return fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) numRecords() (int, error) { if r.recordsType == unknownRecords { if empty, err := r.setTypeFromFields(); err != nil || empty { return 0, err } } switch r.recordsType { case legacyRecords: if r.MsgSet == nil { return 0, nil } return len(r.MsgSet.Messages), nil case defaultRecords: if r.RecordBatch == nil { return 0, nil } return len(r.RecordBatch.Records), nil } return 0, fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) isPartial() (bool, error) { if r.recordsType == unknownRecords { if empty, err := r.setTypeFromFields(); err != nil || empty { return false, err } } switch r.recordsType { case unknownRecords: return false, nil case legacyRecords: if r.MsgSet == nil { return false, nil } return r.MsgSet.PartialTrailingMessage, nil case defaultRecords: if r.RecordBatch == nil { return false, nil } return r.RecordBatch.PartialTrailingRecord, nil } return false, fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) isControl() (bool, error) { if r.recordsType == unknownRecords { if empty, err := r.setTypeFromFields(); err != nil || empty { return false, err } } switch r.recordsType { case legacyRecords: return false, nil case defaultRecords: if r.RecordBatch == nil { return false, nil } return r.RecordBatch.Control, nil } return false, fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) isOverflow() (bool, error) { if r.recordsType == unknownRecords { if empty, err := r.setTypeFromFields(); err != nil || empty { return false, err } } switch r.recordsType { case unknownRecords: return false, nil case legacyRecords: if r.MsgSet == nil { return false, nil } return r.MsgSet.OverflowMessage, nil case defaultRecords: return false, nil } return false, fmt.Errorf("unknown records type: %v", r.recordsType) } func (r *Records) recordsOffset() (*int64, error) { switch r.recordsType { case unknownRecords: return nil, nil case legacyRecords: return nil, nil case defaultRecords: if r.RecordBatch == nil { return nil, nil } return &r.RecordBatch.FirstOffset, nil } return nil, fmt.Errorf("unknown records type: %v", r.recordsType) } func magicValue(pd packetDecoder) (int8, error) { return pd.peekInt8(magicOffset) } func (r *Records) getControlRecord() (ControlRecord, error) { if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 { return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") } firstRecord := r.RecordBatch.Records[0] controlRecord := ControlRecord{} err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) if err != nil { return ControlRecord{}, err } return controlRecord, nil } golang-github-ibm-sarama-1.43.2/records_test.go000066400000000000000000000055001461256741300213770ustar00rootroot00000000000000package sarama import ( "bytes" "reflect" "testing" ) func TestLegacyRecords(t *testing.T) { set := &MessageSet{ Messages: []*MessageBlock{ { Msg: &Message{ Version: 1, }, }, }, } r := newLegacyRecords(set) exp, err := encode(set, nil) if err != nil { t.Fatal(err) } buf, err := encode(&r, nil) if err != nil { t.Fatal(err) } if !bytes.Equal(buf, exp) { t.Errorf("Wrong encoding for legacy records, wanted %v, got %v", exp, buf) } set = &MessageSet{} r = Records{} err = decode(exp, set, nil) if err != nil { t.Fatal(err) } err = decode(buf, &r, nil) if err != nil { t.Fatal(err) } if r.recordsType != legacyRecords { t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords) } if !reflect.DeepEqual(set, r.MsgSet) { t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.MsgSet) } n, err := r.numRecords() if err != nil { t.Fatal(err) } if n != 1 { t.Errorf("Wrong number of records, wanted 1, got %d", n) } p, err := r.isPartial() if err != nil { t.Fatal(err) } if p { t.Errorf("MessageSet shouldn't have a partial trailing message") } c, err := r.isControl() if err != nil { t.Fatal(err) } if c { t.Errorf("MessageSet can't be a control batch") } f, err := r.recordsOffset() if err != nil { t.Fatal(err) } if f != nil { t.Errorf("RecordBatch record offset is invalid") } } func TestDefaultRecords(t *testing.T) { batch := &RecordBatch{ IsTransactional: true, Version: 2, FirstOffset: 1, Records: []*Record{ { Value: []byte{1}, }, }, } r := newDefaultRecords(batch) exp, err := encode(batch, nil) if err != nil { t.Fatal(err) } buf, err := encode(&r, nil) if err != nil { t.Fatal(err) } if !bytes.Equal(buf, exp) { t.Errorf("Wrong encoding for default records, wanted %v, got %v", exp, buf) } batch = &RecordBatch{} r = Records{} err = decode(exp, batch, nil) if err != nil { t.Fatal(err) } err = decode(buf, &r, nil) if err != nil { t.Fatal(err) } if r.recordsType != defaultRecords { t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords) } if !reflect.DeepEqual(batch, r.RecordBatch) { t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.RecordBatch) } n, err := r.numRecords() if err != nil { t.Fatal(err) } if n != 1 { t.Errorf("Wrong number of records, wanted 1, got %d", n) } p, err := r.isPartial() if err != nil { t.Fatal(err) } if p { t.Errorf("RecordBatch shouldn't have a partial trailing record") } c, err := r.isControl() if err != nil { t.Fatal(err) } if c { t.Errorf("RecordBatch shouldn't be a control batch") } f, err := r.recordsOffset() if err != nil { t.Fatal(err) } if f == nil || *f != 1 { t.Errorf("RecordBatch record offset is invalid") } } golang-github-ibm-sarama-1.43.2/request.go000066400000000000000000000127331461256741300203750ustar00rootroot00000000000000package sarama import ( "encoding/binary" "fmt" "io" ) type protocolBody interface { encoder versionedDecoder key() int16 version() int16 headerVersion() int16 isValidVersion() bool requiredVersion() KafkaVersion } type request struct { correlationID int32 clientID string body protocolBody } func (r *request) encode(pe packetEncoder) error { pe.push(&lengthField{}) pe.putInt16(r.body.key()) pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) if r.body.headerVersion() >= 1 { err := pe.putString(r.clientID) if err != nil { return err } } if r.body.headerVersion() >= 2 { // we don't use tag headers at the moment so we just put an array length of 0 pe.putUVarint(0) } err := r.body.encode(pe) if err != nil { return err } return pe.pop() } func (r *request) decode(pd packetDecoder) (err error) { key, err := pd.getInt16() if err != nil { return err } version, err := pd.getInt16() if err != nil { return err } r.correlationID, err = pd.getInt32() if err != nil { return err } r.clientID, err = pd.getString() if err != nil { return err } r.body = allocateBody(key, version) if r.body == nil { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } if r.body.headerVersion() >= 2 { // tagged field _, err = pd.getUVarint() if err != nil { return err } } return r.body.decode(pd, version) } func decodeRequest(r io.Reader) (*request, int, error) { var ( bytesRead int lengthBytes = make([]byte, 4) ) if _, err := io.ReadFull(r, lengthBytes); err != nil { return nil, bytesRead, err } bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) if length <= 4 || length > MaxRequestSize { return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } encodedReq := make([]byte, length) if _, err := io.ReadFull(r, encodedReq); err != nil { return nil, bytesRead, err } bytesRead += len(encodedReq) req := &request{} if err := decode(encodedReq, req, nil); err != nil { return nil, bytesRead, err } return req, bytesRead, nil } func allocateBody(key, version int16) protocolBody { switch key { case 0: return &ProduceRequest{Version: version} case 1: return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: return &MetadataRequest{Version: version} // 4: LeaderAndIsrRequest // 5: StopReplicaRequest // 6: UpdateMetadataRequest // 7: ControlledShutdownRequest case 8: return &OffsetCommitRequest{Version: version} case 9: return &OffsetFetchRequest{Version: version} case 10: return &FindCoordinatorRequest{Version: version} case 11: return &JoinGroupRequest{Version: version} case 12: return &HeartbeatRequest{Version: version} case 13: return &LeaveGroupRequest{Version: version} case 14: return &SyncGroupRequest{Version: version} case 15: return &DescribeGroupsRequest{Version: version} case 16: return &ListGroupsRequest{Version: version} case 17: return &SaslHandshakeRequest{Version: version} case 18: return &ApiVersionsRequest{Version: version} case 19: return &CreateTopicsRequest{Version: version} case 20: return &DeleteTopicsRequest{Version: version} case 21: return &DeleteRecordsRequest{Version: version} case 22: return &InitProducerIDRequest{Version: version} // 23: OffsetForLeaderEpochRequest case 24: return &AddPartitionsToTxnRequest{Version: version} case 25: return &AddOffsetsToTxnRequest{Version: version} case 26: return &EndTxnRequest{Version: version} // 27: WriteTxnMarkersRequest case 28: return &TxnOffsetCommitRequest{Version: version} case 29: return &DescribeAclsRequest{Version: int(version)} case 30: return &CreateAclsRequest{Version: version} case 31: return &DeleteAclsRequest{Version: int(version)} case 32: return &DescribeConfigsRequest{Version: version} case 33: return &AlterConfigsRequest{Version: version} // 34: AlterReplicaLogDirsRequest case 35: return &DescribeLogDirsRequest{Version: version} case 36: return &SaslAuthenticateRequest{Version: version} case 37: return &CreatePartitionsRequest{Version: version} // 38: CreateDelegationTokenRequest // 39: RenewDelegationTokenRequest // 40: ExpireDelegationTokenRequest // 41: DescribeDelegationTokenRequest case 42: return &DeleteGroupsRequest{Version: version} // 43: ElectLeadersRequest case 44: return &IncrementalAlterConfigsRequest{Version: version} case 45: return &AlterPartitionReassignmentsRequest{Version: version} case 46: return &ListPartitionReassignmentsRequest{Version: version} case 47: return &DeleteOffsetsRequest{Version: version} case 48: return &DescribeClientQuotasRequest{Version: version} case 49: return &AlterClientQuotasRequest{Version: version} case 50: return &DescribeUserScramCredentialsRequest{Version: version} case 51: return &AlterUserScramCredentialsRequest{Version: version} // 52: VoteRequest // 53: BeginQuorumEpochRequest // 54: EndQuorumEpochRequest // 55: DescribeQuorumRequest // 56: AlterPartitionRequest // 57: UpdateFeaturesRequest // 58: EnvelopeRequest // 59: FetchSnapshotRequest // 60: DescribeClusterRequest // 61: DescribeProducersRequest // 62: BrokerRegistrationRequest // 63: BrokerHeartbeatRequest // 64: UnregisterBrokerRequest // 65: DescribeTransactionsRequest // 66: ListTransactionsRequest // 67: AllocateProducerIdsRequest // 68: ConsumerGroupHeartbeatRequest } return nil } golang-github-ibm-sarama-1.43.2/request_test.go000066400000000000000000000266721461256741300214430ustar00rootroot00000000000000package sarama import ( "bytes" "fmt" "reflect" "testing" "github.com/davecgh/go-spew/spew" assert "github.com/stretchr/testify/require" ) var names = map[int16]string{ 0: "ProduceRequest", 1: "FetchRequest", 2: "ListOffsetsRequest", 3: "MetadataRequest", 4: "LeaderAndIsrRequest", 5: "StopReplicaRequest", 6: "UpdateMetadataRequest", 7: "ControlledShutdownRequest", 8: "OffsetCommitRequest", 9: "OffsetFetchRequest", 10: "FindCoordinatorRequest", 11: "JoinGroupRequest", 12: "HeartbeatRequest", 13: "LeaveGroupRequest", 14: "SyncGroupRequest", 15: "DescribeGroupsRequest", 16: "ListGroupsRequest", 17: "SaslHandshakeRequest", 18: "ApiVersionsRequest", 19: "CreateTopicsRequest", 20: "DeleteTopicsRequest", 21: "DeleteRecordsRequest", 22: "InitProducerIdRequest", 23: "OffsetForLeaderEpochRequest", 24: "AddPartitionsToTxnRequest", 25: "AddOffsetsToTxnRequest", 26: "EndTxnRequest", 27: "WriteTxnMarkersRequest", 28: "TxnOffsetCommitRequest", 29: "DescribeAclsRequest", 30: "CreateAclsRequest", 31: "DeleteAclsRequest", 32: "DescribeConfigsRequest", 33: "AlterConfigsRequest", 34: "AlterReplicaLogDirsRequest", 35: "DescribeLogDirsRequest", 36: "SaslAuthenticateRequest", 37: "CreatePartitionsRequest", 38: "CreateDelegationTokenRequest", 39: "RenewDelegationTokenRequest", 40: "ExpireDelegationTokenRequest", 41: "DescribeDelegationTokenRequest", 42: "DeleteGroupsRequest", 43: "ElectLeadersRequest", 44: "IncrementalAlterConfigsRequest", 45: "AlterPartitionReassignmentsRequest", 46: "ListPartitionReassignmentsRequest", 47: "OffsetDeleteRequest", 48: "DescribeClientQuotasRequest", 49: "AlterClientQuotasRequest", 50: "DescribeUserScramCredentialsRequest", 51: "AlterUserScramCredentialsRequest", 52: "VoteRequest", 53: "BeginQuorumEpochRequest", 54: "EndQuorumEpochRequest", 55: "DescribeQuorumRequest", 56: "AlterPartitionRequest", 57: "UpdateFeaturesRequest", 58: "EnvelopeRequest", 59: "FetchSnapshotRequest", 60: "DescribeClusterRequest", 61: "DescribeProducersRequest", 62: "BrokerRegistrationRequest", 63: "BrokerHeartbeatRequest", 64: "UnregisterBrokerRequest", 65: "DescribeTransactionsRequest", 66: "ListTransactionsRequest", 67: "AllocateProducerIdsRequest", 68: "ConsumerGroupHeartbeatRequest", } // allocateResponseBody is a test-only clone of allocateBody. There's no // central registry of types, so we can't do this using reflection for Response // types and assuming that the struct is identically named, just with Response // instead of Request. func allocateResponseBody(req protocolBody) protocolBody { key := req.key() version := req.version() switch key { case 0: return &ProduceResponse{Version: version} case 1: return &FetchResponse{Version: version} case 2: return &OffsetResponse{Version: version} case 3: return &MetadataResponse{Version: version} case 8: return &OffsetCommitResponse{Version: version} case 9: return &OffsetFetchResponse{Version: version} case 10: return &FindCoordinatorResponse{Version: version} case 11: return &JoinGroupResponse{Version: version} case 12: return &HeartbeatResponse{Version: version} case 13: return &LeaveGroupResponse{Version: version} case 14: return &SyncGroupResponse{Version: version} case 15: return &DescribeGroupsResponse{Version: version} case 16: return &ListGroupsResponse{Version: version} case 17: return &SaslHandshakeResponse{Version: version} case 18: return &ApiVersionsResponse{Version: version} case 19: return &CreateTopicsResponse{Version: version} case 20: return &DeleteTopicsResponse{Version: version} case 21: return &DeleteRecordsResponse{Version: version} case 22: return &InitProducerIDResponse{Version: version} case 24: return &AddPartitionsToTxnResponse{Version: version} case 25: return &AddOffsetsToTxnResponse{Version: version} case 26: return &EndTxnResponse{Version: version} case 28: return &TxnOffsetCommitResponse{Version: version} case 29: return &DescribeAclsResponse{Version: version} case 30: return &CreateAclsResponse{Version: version} case 31: return &DeleteAclsResponse{Version: version} case 32: return &DescribeConfigsResponse{Version: version} case 33: return &AlterConfigsResponse{Version: version} case 35: return &DescribeLogDirsResponse{Version: version} case 36: return &SaslAuthenticateResponse{Version: version} case 37: return &CreatePartitionsResponse{Version: version} case 42: return &DeleteGroupsResponse{Version: version} case 44: return &IncrementalAlterConfigsResponse{Version: version} case 45: return &AlterPartitionReassignmentsResponse{Version: version} case 46: return &ListPartitionReassignmentsResponse{Version: version} case 47: return &DeleteOffsetsResponse{Version: version} case 48: return &DescribeClientQuotasResponse{Version: version} case 49: return &AlterClientQuotasResponse{Version: version} case 50: return &DescribeUserScramCredentialsResponse{Version: version} case 51: return &AlterUserScramCredentialsResponse{Version: version} } return nil } func TestAllocateBodyProtocolVersions(t *testing.T) { type test struct { version KafkaVersion apiVersions map[int16]int16 } tests := []test{ { V1_1_0_0, map[int16]int16{ 0: 5, 1: 7, 2: 2, 3: 5, 4: 1, 5: 0, 6: 4, 7: 1, 8: 3, 9: 3, 10: 1, 11: 2, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 2, 20: 1, 21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0, 30: 0, 31: 0, 32: 1, 33: 0, 34: 0, 35: 0, 36: 0, 37: 0, 38: 0, 39: 0, 40: 0, 41: 0, 42: 0, }, }, { V2_0_0_0, map[int16]int16{ 0: 6, 1: 8, 2: 3, 3: 6, 4: 1, 5: 0, 6: 4, 7: 1, 8: 4, 9: 4, 10: 2, 11: 3, 12: 2, 13: 2, 14: 2, 15: 2, 16: 2, 17: 1, 18: 2, 19: 3, 20: 2, 21: 1, 22: 1, 23: 1, 24: 1, 25: 1, 26: 1, 27: 0, 28: 1, 29: 1, 30: 1, 31: 1, 32: 2, 33: 1, 34: 1, 35: 1, 36: 0, 37: 1, 38: 1, 39: 1, 40: 1, 41: 1, 42: 1, }, }, { V2_1_0_0, map[int16]int16{ 0: 7, 1: 10, 2: 4, 3: 7, 4: 1, 5: 0, 6: 4, 7: 1, 8: 6, 9: 5, 10: 2, 11: 3, 12: 2, 13: 2, 14: 2, 15: 2, 16: 2, 17: 1, 18: 2, 19: 3, 20: 3, 21: 1, 22: 1, 23: 2, 24: 1, 25: 1, 26: 1, 27: 0, 28: 2, 29: 1, 30: 1, 31: 1, 32: 2, 33: 1, 34: 1, 35: 1, 36: 0, 37: 1, 38: 1, 39: 1, 40: 1, 41: 1, 42: 1, }, }, } for _, tt := range tests { for key, version := range tt.apiVersions { t.Run(fmt.Sprintf("%s-%s", tt.version.String(), names[key]), func(t *testing.T) { req := allocateBody(key, version) if req == nil { t.Skipf("apikey %d is not implemented", key) } resp := allocateResponseBody(req) assert.NotNil(t, resp, fmt.Sprintf("%s has no matching response type in allocateResponseBody", reflect.TypeOf(req))) assert.Equal(t, req.isValidVersion(), resp.isValidVersion(), fmt.Sprintf("%s isValidVersion should match %s", reflect.TypeOf(req), reflect.TypeOf(resp))) assert.Equal(t, req.requiredVersion(), resp.requiredVersion(), fmt.Sprintf("%s requiredVersion should match %s", reflect.TypeOf(req), reflect.TypeOf(resp))) for _, body := range []protocolBody{req, resp} { assert.Equal(t, key, body.key()) assert.Equal(t, version, body.version()) assert.True(t, body.isValidVersion(), fmt.Sprintf("%s v%d is not supported, but expected for KafkaVersion %s", reflect.TypeOf(body), version, tt.version)) assert.True(t, tt.version.IsAtLeast(body.requiredVersion()), fmt.Sprintf("KafkaVersion %s should be enough for %s v%d", tt.version, reflect.TypeOf(body), version)) } }) } } } // not specific to request tests, just helper functions for testing structures that // implement the encoder or decoder interfaces that needed somewhere to live func testEncodable(t *testing.T, name string, in encoder, expect []byte) { t.Helper() packet, err := encode(in, nil) if err != nil { t.Error(err) } else if !bytes.Equal(packet, expect) { t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) } } func testDecodable(t *testing.T, name string, out decoder, in []byte) { t.Helper() err := decode(in, out, nil) if err != nil { t.Error("Decoding", name, "failed:", err) } } func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) { t.Helper() err := versionedDecode(in, out, version, nil) if err != nil { t.Error("Decoding", name, "version", version, "failed:", err) } } func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { t.Helper() if !rb.requiredVersion().IsAtLeast(MinVersion) { t.Errorf("Request %s has invalid required version", name) } packet := testRequestEncode(t, name, rb, expected) testRequestDecode(t, name, rb, packet) } func testRequestWithoutByteComparison(t *testing.T, name string, rb protocolBody) { if !rb.requiredVersion().IsAtLeast(MinVersion) { t.Errorf("Request %s has invalid required version", name) } packet := testRequestEncode(t, name, rb, nil) testRequestDecode(t, name, rb, packet) } func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte { req := &request{correlationID: 123, clientID: "foo", body: rb} packet, err := encode(req, nil) headerSize := 0 switch rb.headerVersion() { case 1: headerSize = 14 + len("foo") case 2: headerSize = 14 + len("foo") + 1 default: t.Error("Encoding", name, "failed\nheaderVersion", rb.headerVersion(), "not implemented") } if err != nil { t.Error(err) } else if expected != nil && !bytes.Equal(packet[headerSize:], expected) { t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) } return packet } func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) { t.Helper() decoded, n, err := decodeRequest(bytes.NewReader(packet)) if err != nil { t.Error("Failed to decode request", err) } else if decoded.correlationID != 123 || decoded.clientID != "foo" { t.Errorf("Decoded header %q is not valid: %+v", name, decoded) } else if !reflect.DeepEqual(rb, decoded.body) { t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body)) } else if n != len(packet) { t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet)) } else if rb.version() != decoded.body.version() { t.Errorf("Decoded request %q version: %d does not match the encoded one: %d\n", name, decoded.body.version(), rb.version()) } } func testResponse(t *testing.T, name string, res protocolBody, expected []byte) { encoded, err := encode(res, nil) if err != nil { t.Error(err) } else if expected != nil && !bytes.Equal(encoded, expected) { t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) } decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder) if err := versionedDecode(encoded, decoded, res.version(), nil); err != nil { t.Error("Decoding", name, "failed:", err) } if !reflect.DeepEqual(decoded, res) { t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) } } func nullString(s string) *string { return &s } golang-github-ibm-sarama-1.43.2/response_header.go000066400000000000000000000010511461256741300220420ustar00rootroot00000000000000package sarama import "fmt" type responseHeader struct { length int32 correlationID int32 } func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { r.length, err = pd.getInt32() if err != nil { return err } if r.length <= 4 || r.length > MaxResponseSize { return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} } r.correlationID, err = pd.getInt32() if version >= 1 { if _, err := pd.getEmptyTaggedFieldArray(); err != nil { return err } } return err } golang-github-ibm-sarama-1.43.2/response_header_test.go000066400000000000000000000017051461256741300231070ustar00rootroot00000000000000package sarama import "testing" var ( responseHeaderBytesV0 = []byte{ 0x00, 0x00, 0x0f, 0x00, 0x0a, 0xbb, 0xcc, 0xff, } responseHeaderBytesV1 = []byte{ 0x00, 0x00, 0x0f, 0x00, 0x0a, 0xbb, 0xcc, 0xff, 0x00, } ) func TestResponseHeaderV0(t *testing.T) { header := responseHeader{} testVersionDecodable(t, "response header", &header, responseHeaderBytesV0, 0) if header.length != 0xf00 { t.Error("Decoding header length failed, got", header.length) } if header.correlationID != 0x0abbccff { t.Error("Decoding header correlation id failed, got", header.correlationID) } } func TestResponseHeaderV1(t *testing.T) { header := responseHeader{} testVersionDecodable(t, "response header", &header, responseHeaderBytesV1, 1) if header.length != 0xf00 { t.Error("Decoding header length failed, got", header.length) } if header.correlationID != 0x0abbccff { t.Error("Decoding header correlation id failed, got", header.correlationID) } } golang-github-ibm-sarama-1.43.2/sarama.go000066400000000000000000000261261461256741300201520ustar00rootroot00000000000000/* Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. To consume messages, use Consumer or Consumer-Group API. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection and message sent on the wire; the Client provides higher-level metadata management that is shared between the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up exactly with the protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. Broker related metrics: +---------------------------------------------------------+------------+---------------------------------------------------------------+ | Name | Type | Description | +---------------------------------------------------------+------------+---------------------------------------------------------------+ | incoming-byte-rate | meter | Bytes/second read off all brokers | | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | | outgoing-byte-rate | meter | Bytes/second written off all brokers | | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | | request-rate | meter | Requests/second sent to all brokers | | request-rate-for-broker- | meter | Requests/second sent to a given broker | | request-size | histogram | Distribution of the request size in bytes for all brokers | | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | | response-rate | meter | Responses/second received from all brokers | | response-rate-for-broker- | meter | Responses/second received from a given broker | | response-size | histogram | Distribution of the response size in bytes for all brokers | | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | | requests-in-flight | counter | The current number of in-flight requests awaiting a response | | | | for all brokers | | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | | | | for a given broker | | protocol-requests-rate- | meter | Number of api requests sent to the brokers for all brokers | | | | https://kafka.apache.org/protocol.html#protocol_api_keys | | | protocol-requests-rate--for-broker- | meter | Number of packets sent to the brokers by api-key for a given | | | | broker | +---------------------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. Producer related metrics: +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | Name | Type | Description | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | | record-send-rate | meter | Records/second sent to all topics | | record-send-rate-for-topic- | meter | Records/second sent to a given topic | | records-per-request | histogram | Distribution of the number of records sent per request for all topics | | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ Consumer related metrics: +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | Name | Type | Description | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch | | consumer-fetch-rate | meter | Fetch requests/second sent to all brokers | | consumer-fetch-rate-for-broker- | meter | Fetch requests/second sent to a given broker | | consumer-fetch-rate-for-topic- | meter | Fetch requests/second sent for a given topic | | consumer-fetch-response-size | histogram | Distribution of the fetch response size in bytes | | consumer-group-join-total- | counter | Total count of consumer group join attempts | | consumer-group-join-failed- | counter | Total count of consumer group join failures | | consumer-group-sync-total- | counter | Total count of consumer group sync attempts | | consumer-group-sync-failed- | counter | Total count of consumer group sync failures | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ */ package sarama import ( "io" "log" ) var ( // Logger is the instance of a StdLogger interface that Sarama writes connection // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) // PanicHandler is called for recovering from panics spawned internally to the library (and thus // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. PanicHandler func(interface{}) // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt // to process. MaxRequestSize int32 = 100 * 1024 * 1024 // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to // protect the client from running out of memory. Please note that brokers do not have any natural limit on // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers // (see https://issues.apache.org/jira/browse/KAFKA-2063). MaxResponseSize int32 = 100 * 1024 * 1024 ) // StdLogger is used to log error messages. type StdLogger interface { Print(v ...interface{}) Printf(format string, v ...interface{}) Println(v ...interface{}) } type debugLogger struct{} func (d *debugLogger) Print(v ...interface{}) { Logger.Print(v...) } func (d *debugLogger) Printf(format string, v ...interface{}) { Logger.Printf(format, v...) } func (d *debugLogger) Println(v ...interface{}) { Logger.Println(v...) } // DebugLogger is the instance of a StdLogger that Sarama writes more verbose // debug information to. By default it is set to redirect all debug to the // default Logger above, but you can optionally set it to another StdLogger // instance to (e.g.,) discard debug information var DebugLogger StdLogger = &debugLogger{} golang-github-ibm-sarama-1.43.2/sarama_test.go000066400000000000000000000004471461256741300212070ustar00rootroot00000000000000//go:build !functional package sarama import ( "flag" "log" "os" "testing" ) func TestMain(m *testing.M) { flag.Parse() if f := flag.Lookup("test.v"); f != nil && f.Value.String() == "true" { Logger = log.New(os.Stderr, "[DEBUG] ", log.Lmicroseconds|log.Ltime) } os.Exit(m.Run()) } golang-github-ibm-sarama-1.43.2/sasl_authenticate_request.go000066400000000000000000000017511461256741300241530ustar00rootroot00000000000000package sarama type SaslAuthenticateRequest struct { // Version defines the protocol version to use for encode and decode Version int16 SaslAuthBytes []byte } // APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API const APIKeySASLAuth = 36 func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { return pe.putBytes(r.SaslAuthBytes) } func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version r.SaslAuthBytes, err = pd.getBytes() return err } func (r *SaslAuthenticateRequest) key() int16 { return APIKeySASLAuth } func (r *SaslAuthenticateRequest) version() int16 { return r.Version } func (r *SaslAuthenticateRequest) headerVersion() int16 { return 1 } func (r *SaslAuthenticateRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_2_0_0 default: return V1_0_0_0 } } golang-github-ibm-sarama-1.43.2/sasl_authenticate_request_test.go000066400000000000000000000007731461256741300252150ustar00rootroot00000000000000package sarama import "testing" var saslAuthenticateRequest = []byte{ 0, 0, 0, 3, 'f', 'o', 'o', } func TestSaslAuthenticateRequest(t *testing.T) { request := new(SaslAuthenticateRequest) request.SaslAuthBytes = []byte(`foo`) testRequest(t, "basic", request, saslAuthenticateRequest) } func TestSaslAuthenticateRequestV1(t *testing.T) { request := new(SaslAuthenticateRequest) request.Version = 1 request.SaslAuthBytes = []byte(`foo`) testRequest(t, "basic", request, saslAuthenticateRequest) } golang-github-ibm-sarama-1.43.2/sasl_authenticate_response.go000066400000000000000000000026721461256741300243240ustar00rootroot00000000000000package sarama type SaslAuthenticateResponse struct { // Version defines the protocol version to use for encode and decode Version int16 Err KError ErrorMessage *string SaslAuthBytes []byte SessionLifetimeMs int64 } func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { pe.putInt16(int16(r.Err)) if err := pe.putNullableString(r.ErrorMessage); err != nil { return err } if err := pe.putBytes(r.SaslAuthBytes); err != nil { return err } if r.Version > 0 { pe.putInt64(r.SessionLifetimeMs) } return nil } func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { r.Version = version kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) if r.ErrorMessage, err = pd.getNullableString(); err != nil { return err } if r.SaslAuthBytes, err = pd.getBytes(); err != nil { return err } if version > 0 { r.SessionLifetimeMs, err = pd.getInt64() } return err } func (r *SaslAuthenticateResponse) key() int16 { return APIKeySASLAuth } func (r *SaslAuthenticateResponse) version() int16 { return r.Version } func (r *SaslAuthenticateResponse) headerVersion() int16 { return 0 } func (r *SaslAuthenticateResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V2_2_0_0 default: return V1_0_0_0 } } golang-github-ibm-sarama-1.43.2/sasl_authenticate_response_test.go000066400000000000000000000016621461256741300253610ustar00rootroot00000000000000package sarama import "testing" var ( saslAuthenticateResponseErr = []byte{ 0, 58, 0, 3, 'e', 'r', 'r', 0, 0, 0, 3, 'm', 's', 'g', } saslAuthenticateResponseErrV1 = []byte{ 0, 58, 0, 3, 'e', 'r', 'r', 0, 0, 0, 3, 'm', 's', 'g', 0, 0, 0, 0, 0, 0, 0, 1, } ) func TestSaslAuthenticateResponse(t *testing.T) { response := new(SaslAuthenticateResponse) response.Err = ErrSASLAuthenticationFailed msg := "err" response.ErrorMessage = &msg response.SaslAuthBytes = []byte(`msg`) testResponse(t, "authenticate response", response, saslAuthenticateResponseErr) } func TestSaslAuthenticateResponseV1(t *testing.T) { response := new(SaslAuthenticateResponse) response.Err = ErrSASLAuthenticationFailed msg := "err" response.Version = 1 response.ErrorMessage = &msg response.SaslAuthBytes = []byte(`msg`) response.SessionLifetimeMs = 1 testResponse(t, "authenticate response", response, saslAuthenticateResponseErrV1) } golang-github-ibm-sarama-1.43.2/sasl_handshake_request.go000066400000000000000000000015161461256741300234220ustar00rootroot00000000000000package sarama type SaslHandshakeRequest struct { Mechanism string Version int16 } func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { if err := pe.putString(r.Mechanism); err != nil { return err } return nil } func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { if r.Mechanism, err = pd.getString(); err != nil { return err } return nil } func (r *SaslHandshakeRequest) key() int16 { return 17 } func (r *SaslHandshakeRequest) version() int16 { return r.Version } func (r *SaslHandshakeRequest) headerVersion() int16 { return 1 } func (r *SaslHandshakeRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V1_0_0_0 default: return V0_10_0_0 } } golang-github-ibm-sarama-1.43.2/sasl_handshake_request_test.go000066400000000000000000000004121461256741300244530ustar00rootroot00000000000000package sarama import "testing" var baseSaslRequest = []byte{ 0, 3, 'f', 'o', 'o', // Mechanism } func TestSaslHandshakeRequest(t *testing.T) { request := new(SaslHandshakeRequest) request.Mechanism = "foo" testRequest(t, "basic", request, baseSaslRequest) } golang-github-ibm-sarama-1.43.2/sasl_handshake_response.go000066400000000000000000000017301461256741300235660ustar00rootroot00000000000000package sarama type SaslHandshakeResponse struct { Version int16 Err KError EnabledMechanisms []string } func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { pe.putInt16(int16(r.Err)) return pe.putStringArray(r.EnabledMechanisms) } func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { return err } return nil } func (r *SaslHandshakeResponse) key() int16 { return 17 } func (r *SaslHandshakeResponse) version() int16 { return r.Version } func (r *SaslHandshakeResponse) headerVersion() int16 { return 0 } func (r *SaslHandshakeResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 1 } func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V1_0_0_0 default: return V0_10_0_0 } } golang-github-ibm-sarama-1.43.2/sasl_handshake_response_test.go000066400000000000000000000011011461256741300246150ustar00rootroot00000000000000package sarama import ( "errors" "testing" ) var saslHandshakeResponse = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 'f', 'o', 'o', } func TestSaslHandshakeResponse(t *testing.T) { response := new(SaslHandshakeResponse) testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0) if !errors.Is(response.Err, ErrNoError) { t.Error("Decoding error failed: no error expected but found", response.Err) } if response.EnabledMechanisms[0] != "foo" { t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms) } } golang-github-ibm-sarama-1.43.2/scram_formatter.go000066400000000000000000000030351461256741300220700ustar00rootroot00000000000000package sarama import ( "crypto/hmac" "crypto/sha256" "crypto/sha512" "hash" ) // ScramFormatter implementation // @see: https://github.com/apache/kafka/blob/99b9b3e84f4e98c3f07714e1de6a139a004cbc5b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramFormatter.java#L93 type scramFormatter struct { mechanism ScramMechanismType } func (s scramFormatter) mac(key []byte) (hash.Hash, error) { var m hash.Hash switch s.mechanism { case SCRAM_MECHANISM_SHA_256: m = hmac.New(sha256.New, key) case SCRAM_MECHANISM_SHA_512: m = hmac.New(sha512.New, key) default: return nil, ErrUnknownScramMechanism } return m, nil } func (s scramFormatter) hmac(key []byte, extra []byte) ([]byte, error) { mac, err := s.mac(key) if err != nil { return nil, err } if _, err := mac.Write(extra); err != nil { return nil, err } return mac.Sum(nil), nil } func (s scramFormatter) xor(result []byte, second []byte) { for i := 0; i < len(result); i++ { result[i] = result[i] ^ second[i] } } func (s scramFormatter) saltedPassword(password []byte, salt []byte, iterations int) ([]byte, error) { mac, err := s.mac(password) if err != nil { return nil, err } if _, err := mac.Write(salt); err != nil { return nil, err } if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil { return nil, err } u1 := mac.Sum(nil) prev := u1 result := u1 for i := 2; i <= iterations; i++ { ui, err := s.hmac(password, prev) if err != nil { return nil, err } s.xor(result, ui) prev = ui } return result, nil } golang-github-ibm-sarama-1.43.2/scram_formatter_test.go000066400000000000000000000046721461256741300231370ustar00rootroot00000000000000package sarama import ( "bytes" "testing" ) /* Following code can be used to validate saltedPassword implementation:
import org.apache.kafka.common.security.scram.internals.ScramFormatter;
import org.apache.kafka.common.security.scram.internals.ScramMechanism;
import java.nio.charset.StandardCharsets;

public class App {

    public static String bytesToHex(byte[] in) {
        final StringBuilder builder = new StringBuilder();
        for(byte b : in) {
            builder.append(String.format("0x%02x, ", b));
        }
        return builder.toString();
    }

	public static void main(String[] args) throws NoSuchAlgorithmException, InvalidKeyException {
	   int digestIterations = 4096;
	   String password = "hello";
	   byte[] salt = "world".getBytes(StandardCharsets.UTF_8);
	   byte[] saltedPassword = new ScramFormatter(ScramMechanism.SCRAM_SHA_256)
			   .saltedPassword(password, salt, digestIterations);
	   System.out.println(bytesToHex(saltedPassword));
	}
}
*/ func TestScramSaltedPasswordSha512(t *testing.T) { password := []byte("hello") salt := []byte("world") formatter := scramFormatter{mechanism: SCRAM_MECHANISM_SHA_512} result, _ := formatter.saltedPassword(password, salt, 4096) // calculated using ScramFormatter (see comment above) expected := []byte{ 0x35, 0x0c, 0x77, 0x84, 0x8a, 0x63, 0x06, 0x92, 0x00, 0x6e, 0xc6, 0x6a, 0x0c, 0x39, 0xeb, 0xb0, 0x00, 0xd3, 0xf8, 0x8a, 0x94, 0xae, 0x7f, 0x8c, 0xcd, 0x1d, 0x92, 0x52, 0x6c, 0x5b, 0x16, 0x15, 0x86, 0x3b, 0xde, 0xa1, 0x6c, 0x12, 0x9a, 0x7b, 0x09, 0xed, 0x0e, 0x38, 0xf2, 0x07, 0x4d, 0x2f, 0xe2, 0x9f, 0x0f, 0x41, 0xe1, 0xfb, 0x00, 0xc1, 0xd3, 0xbd, 0xd3, 0xfd, 0x51, 0x0b, 0xa9, 0x8f, } if !bytes.Equal(result, expected) { t.Errorf("saltedPassword SHA-512 failed, expected: %v, result: %v", expected, result) } } func TestScramSaltedPasswordSha256(t *testing.T) { password := []byte("hello") salt := []byte("world") formatter := scramFormatter{mechanism: SCRAM_MECHANISM_SHA_256} result, _ := formatter.saltedPassword(password, salt, 4096) // calculated using ScramFormatter (see comment above) expected := []byte{ 0xc1, 0x55, 0x53, 0x03, 0xda, 0x30, 0x9f, 0x6b, 0x7d, 0x1e, 0x8f, 0xe4, 0x56, 0x36, 0xbf, 0xdd, 0xdc, 0x4b, 0xf5, 0x64, 0x05, 0xe7, 0xe9, 0x4e, 0x9d, 0x15, 0xf0, 0xe7, 0xb9, 0xcb, 0xd3, 0x80, } if !bytes.Equal(result, expected) { t.Errorf("saltedPassword SHA-256 failed, expected: %v, result: %v", expected, result) } } golang-github-ibm-sarama-1.43.2/sticky_assignor_user_data.go000066400000000000000000000063501461256741300241450ustar00rootroot00000000000000package sarama type topicPartitionAssignment struct { Topic string Partition int32 } type StickyAssignorUserData interface { partitions() []topicPartitionAssignment hasGeneration() bool generation() int } // StickyAssignorUserDataV0 holds topic partition information for an assignment type StickyAssignorUserDataV0 struct { Topics map[string][]int32 topicPartitions []topicPartitionAssignment } func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(m.Topics)); err != nil { return err } for topic, partitions := range m.Topics { if err := pe.putString(topic); err != nil { return err } if err := pe.putInt32Array(partitions); err != nil { return err } } return nil } func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) { var topicLen int if topicLen, err = pd.getArrayLength(); err != nil { return } m.Topics = make(map[string][]int32, topicLen) for i := 0; i < topicLen; i++ { var topic string if topic, err = pd.getString(); err != nil { return } if m.Topics[topic], err = pd.getInt32Array(); err != nil { return } } m.topicPartitions = populateTopicPartitions(m.Topics) return nil } func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions } func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } // StickyAssignorUserDataV1 holds topic partition information for an assignment type StickyAssignorUserDataV1 struct { Topics map[string][]int32 Generation int32 topicPartitions []topicPartitionAssignment } func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(m.Topics)); err != nil { return err } for topic, partitions := range m.Topics { if err := pe.putString(topic); err != nil { return err } if err := pe.putInt32Array(partitions); err != nil { return err } } pe.putInt32(m.Generation) return nil } func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) { var topicLen int if topicLen, err = pd.getArrayLength(); err != nil { return } m.Topics = make(map[string][]int32, topicLen) for i := 0; i < topicLen; i++ { var topic string if topic, err = pd.getString(); err != nil { return } if m.Topics[topic], err = pd.getInt32Array(); err != nil { return } } m.Generation, err = pd.getInt32() if err != nil { return err } m.topicPartitions = populateTopicPartitions(m.Topics) return nil } func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions } func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true } func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) } func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment { topicPartitions := make([]topicPartitionAssignment, 0) for topic, partitions := range topics { for _, partition := range partitions { topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition}) } } return topicPartitions } golang-github-ibm-sarama-1.43.2/sticky_assignor_user_data_test.go000066400000000000000000000033571461256741300252100ustar00rootroot00000000000000package sarama import ( "encoding/base64" "testing" ) func TestStickyAssignorUserDataV0(t *testing.T) { // Single topic with deterministic ordering across encode-decode req := &StickyAssignorUserDataV0{} data := decodeUserDataBytes(t, "AAAAAQADdDAzAAAAAQAAAAU=") testDecodable(t, "", req, data) testEncodable(t, "", req, data) // Multiple partitions req = &StickyAssignorUserDataV0{} data = decodeUserDataBytes(t, "AAAAAQADdDE4AAAAEgAAAAAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEQ==") testDecodable(t, "", req, data) // Multiple topics and partitions req = &StickyAssignorUserDataV0{} data = decodeUserDataBytes(t, "AAAABQADdDEyAAAAAgAAAAIAAAAKAAN0MTEAAAABAAAABAADdDE0AAAAAQAAAAgAA3QxMwAAAAEAAAANAAN0MDkAAAABAAAABQ==") testDecodable(t, "", req, data) } func TestStickyAssignorUserDataV1(t *testing.T) { // Single topic with deterministic ordering across encode-decode req := &StickyAssignorUserDataV1{} data := decodeUserDataBytes(t, "AAAAAQADdDA2AAAAAgAAAAAAAAAE/////w==") testDecodable(t, "", req, data) testEncodable(t, "", req, data) // Multiple topics and partitions req = &StickyAssignorUserDataV1{} data = decodeUserDataBytes(t, "AAAABgADdDEwAAAAAgAAAAIAAAAJAAN0MTIAAAACAAAAAwAAAAsAA3QxNAAAAAEAAAAEAAN0MTMAAAABAAAACwADdDE1AAAAAQAAAAwAA3QwOQAAAAEAAAAG/////w==") testDecodable(t, "", req, data) // Generation is populated req = &StickyAssignorUserDataV1{} data = decodeUserDataBytes(t, "AAAAAQAHdG9waWMwMQAAAAMAAAAAAAAAAQAAAAIAAAAB") testDecodable(t, "", req, data) } func decodeUserDataBytes(t *testing.T, base64Data string) []byte { data, err := base64.StdEncoding.DecodeString(base64Data) if err != nil { t.Errorf("Error decoding data: %v", err) t.FailNow() } return data } golang-github-ibm-sarama-1.43.2/sync_group_request.go000066400000000000000000000070101461256741300226350ustar00rootroot00000000000000package sarama type SyncGroupRequestAssignment struct { // MemberId contains the ID of the member to assign. MemberId string // Assignment contains the member assignment. Assignment []byte } func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) { if err := pe.putString(a.MemberId); err != nil { return err } if err := pe.putBytes(a.Assignment); err != nil { return err } return nil } func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) { if a.MemberId, err = pd.getString(); err != nil { return err } if a.Assignment, err = pd.getBytes(); err != nil { return err } return nil } type SyncGroupRequest struct { // Version defines the protocol version to use for encode and decode Version int16 // GroupId contains the unique group identifier. GroupId string // GenerationId contains the generation of the group. GenerationId int32 // MemberId contains the member ID assigned by the group. MemberId string // GroupInstanceId contains the unique identifier of the consumer instance provided by end user. GroupInstanceId *string // GroupAssignments contains each assignment. GroupAssignments []SyncGroupRequestAssignment } func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) { if err := pe.putString(s.GroupId); err != nil { return err } pe.putInt32(s.GenerationId) if err := pe.putString(s.MemberId); err != nil { return err } if s.Version >= 3 { if err := pe.putNullableString(s.GroupInstanceId); err != nil { return err } } if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil { return err } for _, block := range s.GroupAssignments { if err := block.encode(pe, s.Version); err != nil { return err } } return nil } func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { s.Version = version if s.GroupId, err = pd.getString(); err != nil { return err } if s.GenerationId, err = pd.getInt32(); err != nil { return err } if s.MemberId, err = pd.getString(); err != nil { return err } if s.Version >= 3 { if s.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } } if numAssignments, err := pd.getArrayLength(); err != nil { return err } else if numAssignments > 0 { s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments) for i := 0; i < numAssignments; i++ { var block SyncGroupRequestAssignment if err := block.decode(pd, s.Version); err != nil { return err } s.GroupAssignments[i] = block } } return nil } func (r *SyncGroupRequest) key() int16 { return 14 } func (r *SyncGroupRequest) version() int16 { return r.Version } func (r *SyncGroupRequest) headerVersion() int16 { return 1 } func (r *SyncGroupRequest) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *SyncGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_3_0_0 } } func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{ MemberId: memberId, Assignment: memberAssignment, }) } func (r *SyncGroupRequest) AddGroupAssignmentMember( memberId string, memberAssignment *ConsumerGroupMemberAssignment, ) error { bin, err := encode(memberAssignment, nil) if err != nil { return err } r.AddGroupAssignment(memberId, bin) return nil } golang-github-ibm-sarama-1.43.2/sync_group_request_test.go000066400000000000000000000042651461256741300237050ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( emptySyncGroupRequest = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID 0, 0, 0, 0, // no assignments } populatedSyncGroupRequest = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID 0, 0, 0, 1, // one assignment 0, 3, 'b', 'a', 'z', // Member ID 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment } ) func TestSyncGroupRequest(t *testing.T) { var request *SyncGroupRequest request = new(SyncGroupRequest) request.GroupId = "foo" request.GenerationId = 66051 request.MemberId = "baz" testRequest(t, "empty", request, emptySyncGroupRequest) request = new(SyncGroupRequest) request.GroupId = "foo" request.GenerationId = 66051 request.MemberId = "baz" request.AddGroupAssignment("baz", []byte("foo")) testRequest(t, "populated", request, populatedSyncGroupRequest) } var ( populatedSyncGroupRequestV3 = []byte{ 0, 3, 'f', 'o', 'o', // Group ID 0x00, 0x01, 0x02, 0x03, // Generation ID 0, 3, 'b', 'a', 'z', // Member ID 0, 3, 'g', 'i', 'd', // GroupInstance ID 0, 0, 0, 1, // one assignment 0, 3, 'b', 'a', 'z', // Member ID 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment } ) func TestSyncGroupRequestV3AndPlus(t *testing.T) { groupInstanceId := "gid" tests := []struct { CaseName string Version int16 MessageBytes []byte Message *SyncGroupRequest }{ { "v3", 3, populatedSyncGroupRequestV3, &SyncGroupRequest{ Version: 3, GroupId: "foo", GenerationId: 0x00010203, MemberId: "baz", GroupInstanceId: &groupInstanceId, GroupAssignments: []SyncGroupRequestAssignment{ { MemberId: "baz", Assignment: []byte("foo"), }, }, }, }, } for _, c := range tests { request := new(SyncGroupRequest) testVersionDecodable(t, c.CaseName, request, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, request) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, request) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/sync_group_response.go000066400000000000000000000035121461256741300230060ustar00rootroot00000000000000package sarama import "time" type SyncGroupResponse struct { // Version defines the protocol version to use for encode and decode Version int16 // ThrottleTime contains the duration in milliseconds for which the // request was throttled due to a quota violation, or zero if the request // did not violate any quota. ThrottleTime int32 // Err contains the error code, or 0 if there was no error. Err KError // MemberAssignment contains the member assignment. MemberAssignment []byte } func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { assignment := new(ConsumerGroupMemberAssignment) err := decode(r.MemberAssignment, assignment, nil) return assignment, err } func (r *SyncGroupResponse) encode(pe packetEncoder) error { if r.Version >= 1 { pe.putInt32(r.ThrottleTime) } pe.putInt16(int16(r.Err)) return pe.putBytes(r.MemberAssignment) } func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { r.Version = version if r.Version >= 1 { if r.ThrottleTime, err = pd.getInt32(); err != nil { return err } } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) r.MemberAssignment, err = pd.getBytes() return } func (r *SyncGroupResponse) key() int16 { return 14 } func (r *SyncGroupResponse) version() int16 { return r.Version } func (r *SyncGroupResponse) headerVersion() int16 { return 0 } func (r *SyncGroupResponse) isValidVersion() bool { return r.Version >= 0 && r.Version <= 3 } func (r *SyncGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 3: return V2_3_0_0 case 2: return V2_0_0_0 case 1: return V0_11_0_0 case 0: return V0_9_0_0 default: return V2_3_0_0 } } func (r *SyncGroupResponse) throttleTime() time.Duration { return time.Duration(r.ThrottleTime) * time.Millisecond } golang-github-ibm-sarama-1.43.2/sync_group_response_test.go000066400000000000000000000030511461256741300240430ustar00rootroot00000000000000package sarama import ( "reflect" "testing" ) var ( syncGroupResponseV0NoError = []byte{ 0x00, 0x00, // No error 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data } syncGroupResponseV0WithError = []byte{ 0, 27, // ErrRebalanceInProgress 0, 0, 0, 0, // No member assignment data } syncGroupResponseV1NoError = []byte{ 0, 0, 0, 100, // ThrottleTimeMs 0x00, 0x00, // No error 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data } ) func TestSyncGroupResponse(t *testing.T) { tests := []struct { CaseName string Version int16 MessageBytes []byte Message *SyncGroupResponse }{ { "v0-noErr", 0, syncGroupResponseV0NoError, &SyncGroupResponse{ Version: 0, Err: ErrNoError, MemberAssignment: []byte{1, 2, 3}, }, }, { "v0-Err", 0, syncGroupResponseV0WithError, &SyncGroupResponse{ Version: 0, Err: ErrRebalanceInProgress, MemberAssignment: []byte{}, }, }, { "v1-noErr", 1, syncGroupResponseV1NoError, &SyncGroupResponse{ ThrottleTime: 100, Version: 1, Err: ErrNoError, MemberAssignment: []byte{1, 2, 3}, }, }, } for _, c := range tests { response := new(SyncGroupResponse) testVersionDecodable(t, c.CaseName, response, c.MessageBytes, c.Version) if !reflect.DeepEqual(c.Message, response) { t.Errorf("case %s decode failed, expected:%+v got %+v", c.CaseName, c.Message, response) } testEncodable(t, c.CaseName, c.Message, c.MessageBytes) } } golang-github-ibm-sarama-1.43.2/sync_producer.go000066400000000000000000000137131461256741300215630ustar00rootroot00000000000000package sarama import "sync" // SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct // broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer // to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. // // The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual // durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. // There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. // // For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to // be set to true in its configuration. type SyncProducer interface { // SendMessage produces a given message, and returns only when it either has // succeeded or failed to produce. It will return the partition and the offset // of the produced message, or an error if the message failed to produce. SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) // SendMessages produces a given set of messages, and returns only when all // messages in the set have either succeeded or failed. Note that messages // can succeed and fail individually; if some succeed and some fail, // SendMessages will return an error. SendMessages(msgs []*ProducerMessage) error // Close shuts down the producer; you must call this function before a producer // object passes out of scope, as it may otherwise leak memory. // You must call this before calling Close on the underlying client. Close() error // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. BeginTxn() error // CommitTxn commit current transaction. CommitTxn() error // AbortTxn abort current transaction. AbortTxn() error // AddOffsetsToTxn add associated offsets to current transaction. AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error // AddMessageToTxn add message offsets to current transaction. AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type syncProducer struct { producer *asyncProducer wg sync.WaitGroup } // NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { if config == nil { config = NewConfig() config.Producer.Return.Successes = true } if err := verifyProducerConfig(config); err != nil { return nil, err } p, err := NewAsyncProducer(addrs, config) if err != nil { return nil, err } return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil } // NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewSyncProducerFromClient(client Client) (SyncProducer, error) { if err := verifyProducerConfig(client.Config()); err != nil { return nil, err } p, err := NewAsyncProducerFromClient(client) if err != nil { return nil, err } return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil } func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { sp := &syncProducer{producer: p} sp.wg.Add(2) go withRecover(sp.handleSuccesses) go withRecover(sp.handleErrors) return sp } func verifyProducerConfig(config *Config) error { if !config.Producer.Return.Errors { return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") } if !config.Producer.Return.Successes { return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") } return nil } func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { expectation := make(chan *ProducerError, 1) msg.expectation = expectation sp.producer.Input() <- msg if pErr := <-expectation; pErr != nil { return -1, -1, pErr.Err } return msg.Partition, msg.Offset, nil } func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { expectations := make(chan chan *ProducerError, len(msgs)) go func() { for _, msg := range msgs { expectation := make(chan *ProducerError, 1) msg.expectation = expectation sp.producer.Input() <- msg expectations <- expectation } close(expectations) }() var errors ProducerErrors for expectation := range expectations { if pErr := <-expectation; pErr != nil { errors = append(errors, pErr) } } if len(errors) > 0 { return errors } return nil } func (sp *syncProducer) handleSuccesses() { defer sp.wg.Done() for msg := range sp.producer.Successes() { expectation := msg.expectation expectation <- nil } } func (sp *syncProducer) handleErrors() { defer sp.wg.Done() for err := range sp.producer.Errors() { expectation := err.Msg.expectation expectation <- err } } func (sp *syncProducer) Close() error { sp.producer.AsyncClose() sp.wg.Wait() return nil } func (sp *syncProducer) IsTransactional() bool { return sp.producer.IsTransactional() } func (sp *syncProducer) BeginTxn() error { return sp.producer.BeginTxn() } func (sp *syncProducer) CommitTxn() error { return sp.producer.CommitTxn() } func (sp *syncProducer) AbortTxn() error { return sp.producer.AbortTxn() } func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { return sp.producer.AddOffsetsToTxn(offsets, groupId) } func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { return sp.producer.AddMessageToTxn(msg, groupId, metadata) } func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag { return p.producer.TxnStatus() } golang-github-ibm-sarama-1.43.2/sync_producer_test.go000066400000000000000000000221441461256741300226200ustar00rootroot00000000000000package sarama import ( "errors" "log" "sync" "testing" ) func TestSyncProducer(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) for i := 0; i < 10; i++ { leader.Returns(prodSuccess) } config := NewTestConfig() config.Producer.Return.Successes = true producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { msg := &ProducerMessage{ Topic: "my_topic", Value: StringEncoder(TestMessage), Metadata: "test", } partition, offset, err := producer.SendMessage(msg) if partition != 0 || msg.Partition != partition { t.Error("Unexpected partition") } if offset != 0 || msg.Offset != offset { t.Error("Unexpected offset") } if str, ok := msg.Metadata.(string); !ok || str != "test" { t.Error("Unexpected metadata") } if err != nil { t.Error(err) } } safeClose(t, producer) leader.Close() seedBroker.Close() } func TestSyncProducerTransactional(t *testing.T) { seedBroker := NewMockBroker(t, 1) defer seedBroker.Close() leader := NewMockBroker(t, 2) defer leader.Close() config := NewTestConfig() config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Producer.Return.Successes = true config.Producer.Transaction.ID = "test" config.Producer.Idempotent = true config.Producer.Retry.Max = 5 config.Net.MaxOpenRequests = 1 metadataResponse := new(MetadataResponse) metadataResponse.Version = 4 metadataResponse.ControllerID = leader.BrokerID() metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopic("my_topic", ErrNoError) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) client, err := NewClient([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } defer safeClose(t, client) findCoordinatorResponse := new(FindCoordinatorResponse) findCoordinatorResponse.Coordinator = client.Brokers()[0] findCoordinatorResponse.Version = 1 leader.Returns(findCoordinatorResponse) initProducerIdResponse := new(InitProducerIDResponse) leader.Returns(initProducerIdResponse) addPartitionToTxn := new(AddPartitionsToTxnResponse) addPartitionToTxn.Errors = map[string][]*PartitionError{ "my_topic": { { Partition: 0, }, }, } leader.Returns(addPartitionToTxn) prodSuccess := new(ProduceResponse) prodSuccess.Version = 3 prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) for i := 0; i < 10; i++ { leader.Returns(prodSuccess) } endTxnResponse := &EndTxnResponse{} leader.Returns(endTxnResponse) producer, err := NewSyncProducerFromClient(client) if err != nil { t.Fatal(err) } if !producer.IsTransactional() { t.Error("producer is not transactional") } err = producer.BeginTxn() if err != nil { t.Fatal(err) } if producer.TxnStatus()&ProducerTxnFlagInTransaction == 0 { t.Error("transaction must started") } for i := 0; i < 10; i++ { msg := &ProducerMessage{ Topic: "my_topic", Value: StringEncoder(TestMessage), Metadata: "test", } partition, offset, err := producer.SendMessage(msg) if partition != 0 || msg.Partition != partition { t.Error("Unexpected partition") } if offset != 0 || msg.Offset != offset { t.Error("Unexpected offset") } if str, ok := msg.Metadata.(string); !ok || str != "test" { t.Error("Unexpected metadata") } if err != nil { t.Error(err) } } err = producer.CommitTxn() if err != nil { t.Fatal(err) } safeClose(t, producer) } func TestSyncProducerBatch(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) config := NewTestConfig() config.Producer.Flush.Messages = 3 config.Producer.Return.Successes = true producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } err = producer.SendMessages([]*ProducerMessage{ { Topic: "my_topic", Value: StringEncoder(TestMessage), Metadata: "test", }, { Topic: "my_topic", Value: StringEncoder(TestMessage), Metadata: "test", }, { Topic: "my_topic", Value: StringEncoder(TestMessage), Metadata: "test", }, }) if err != nil { t.Error(err) } safeClose(t, producer) leader.Close() seedBroker.Close() } func TestConcurrentSyncProducer(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader := NewMockBroker(t, 2) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader.Returns(prodSuccess) config := NewTestConfig() config.Producer.Flush.Messages = 100 config.Producer.Return.Successes = true producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } wg := sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func() { msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} partition, _, err := producer.SendMessage(msg) if partition != 0 { t.Error("Unexpected partition") } if err != nil { t.Error(err) } wg.Done() }() } wg.Wait() safeClose(t, producer) leader.Close() seedBroker.Close() } func TestSyncProducerToNonExistingTopic(t *testing.T) { broker := NewMockBroker(t, 1) metadataResponse := new(MetadataResponse) metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) broker.Returns(metadataResponse) config := NewTestConfig() config.Metadata.Retry.Max = 0 config.Producer.Retry.Max = 0 config.Producer.Return.Successes = true producer, err := NewSyncProducer([]string{broker.Addr()}, config) if err != nil { t.Fatal(err) } metadataResponse = new(MetadataResponse) metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) broker.Returns(metadataResponse) _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) if !errors.Is(err, ErrUnknownTopicOrPartition) { t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) } safeClose(t, producer) broker.Close() } func TestSyncProducerRecoveryWithRetriesDisabled(t *testing.T) { seedBroker := NewMockBroker(t, 1) leader1 := NewMockBroker(t, 2) leader2 := NewMockBroker(t, 3) metadataLeader1 := new(MetadataResponse) metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataLeader1) config := NewTestConfig() config.Producer.Retry.Max = 0 // disable! config.Producer.Retry.Backoff = 0 config.Producer.Return.Successes = true producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) if err != nil { t.Fatal(err) } seedBroker.Close() prodNotLeader := new(ProduceResponse) prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) leader1.Returns(prodNotLeader) _, _, err = producer.SendMessage(&ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}) if !errors.Is(err, ErrNotLeaderForPartition) { t.Fatal(err) } metadataLeader2 := new(MetadataResponse) metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, nil, ErrNoError) leader1.Returns(metadataLeader2) prodSuccess := new(ProduceResponse) prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) leader2.Returns(prodSuccess) _, _, err = producer.SendMessage(&ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}) if err != nil { t.Fatal(err) } leader1.Close() leader2.Close() safeClose(t, producer) } // This example shows the basic usage pattern of the SyncProducer. func ExampleSyncProducer() { producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) if err != nil { log.Fatalln(err) } defer func() { if err := producer.Close(); err != nil { log.Fatalln(err) } }() msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} partition, offset, err := producer.SendMessage(msg) if err != nil { log.Printf("FAILED to send message: %s\n", err) } else { log.Printf("> message sent to partition %d at offset %d\n", partition, offset) } } golang-github-ibm-sarama-1.43.2/timestamp.go000066400000000000000000000013501461256741300207010ustar00rootroot00000000000000package sarama import ( "fmt" "time" ) type Timestamp struct { *time.Time } func (t Timestamp) encode(pe packetEncoder) error { timestamp := int64(-1) if !t.Before(time.Unix(0, 0)) { timestamp = t.UnixNano() / int64(time.Millisecond) } else if !t.IsZero() { return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} } pe.putInt64(timestamp) return nil } func (t Timestamp) decode(pd packetDecoder) error { millis, err := pd.getInt64() if err != nil { return err } // negative timestamps are invalid, in these cases we should return // a zero time timestamp := time.Time{} if millis >= 0 { timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) } *t.Time = timestamp return nil } golang-github-ibm-sarama-1.43.2/tools/000077500000000000000000000000001461256741300175105ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/README.md000066400000000000000000000016131461256741300207700ustar00rootroot00000000000000# Sarama tools This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. - [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. - [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. - [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. - [kafka-producer-performance](./kafka-producer-performance): a command line tool to performance test producers (sync and async) on your Kafka cluster. To install all tools, run `go install github.com/IBM/sarama/tools/...@latest` golang-github-ibm-sarama-1.43.2/tools/kafka-console-consumer/000077500000000000000000000000001461256741300240565ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/kafka-console-consumer/.gitignore000066400000000000000000000000631461256741300260450ustar00rootroot00000000000000kafka-console-consumer kafka-console-consumer.test golang-github-ibm-sarama-1.43.2/tools/kafka-console-consumer/README.md000066400000000000000000000016601461256741300253400ustar00rootroot00000000000000# kafka-console-consumer A simple command line tool to consume partitions of a topic and print the messages on the standard output. ### Installation go get github.com/IBM/sarama/tools/kafka-console-consumer ### Usage # Minimum invocation kafka-console-consumer -topic=test -brokers=kafka1:9092 # It will pick up a KAFKA_PEERS environment variable export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 kafka-console-consumer -topic=test # You can specify the offset you want to start at. It can be either # `oldest`, `newest`. The default is `newest`. kafka-console-consumer -topic=test -offset=oldest kafka-console-consumer -topic=test -offset=newest # You can specify the partition(s) you want to consume as a comma-separated # list. The default is `all`. kafka-console-consumer -topic=test -partitions=1,2,3 # Display all command line options kafka-console-consumer -help golang-github-ibm-sarama-1.43.2/tools/kafka-console-consumer/kafka-console-consumer.go000066400000000000000000000105101461256741300307500ustar00rootroot00000000000000package main import ( "flag" "fmt" "log" "os" "os/signal" "strconv" "strings" "sync" "syscall" "github.com/IBM/sarama" "github.com/IBM/sarama/tools/tls" ) var ( brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") topic = flag.String("topic", "", "REQUIRED: the topic to consume") partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") tlsEnabled = flag.Bool("tls-enabled", false, "Whether to enable TLS") tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether skip TLS server cert verification") tlsClientCert = flag.String("tls-client-cert", "", "Client cert for client authentication (use with -tls-enabled and -tls-client-key)") tlsClientKey = flag.String("tls-client-key", "", "Client key for client authentication (use with tls-enabled and -tls-client-cert)") bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") logger = log.New(os.Stderr, "", log.LstdFlags) ) func main() { flag.Parse() if *brokerList == "" { printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") } if *topic == "" { printUsageErrorAndExit("-topic is required") } if *verbose { sarama.Logger = logger } var initialOffset int64 switch *offset { case "oldest": initialOffset = sarama.OffsetOldest case "newest": initialOffset = sarama.OffsetNewest default: printUsageErrorAndExit("-offset should be `oldest` or `newest`") } config := sarama.NewConfig() if *tlsEnabled { tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey) if err != nil { printErrorAndExit(69, "Failed to create TLS config: %s", err) } config.Net.TLS.Enable = true config.Net.TLS.Config = tlsConfig config.Net.TLS.Config.InsecureSkipVerify = *tlsSkipVerify } c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), config) if err != nil { printErrorAndExit(69, "Failed to start consumer: %s", err) } partitionList, err := getPartitions(c) if err != nil { printErrorAndExit(69, "Failed to get the list of partitions: %s", err) } var ( messages = make(chan *sarama.ConsumerMessage, *bufferSize) closing = make(chan struct{}) wg sync.WaitGroup ) go func() { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGTERM, os.Interrupt) <-signals logger.Println("Initiating shutdown of consumer...") close(closing) }() for _, partition := range partitionList { pc, err := c.ConsumePartition(*topic, partition, initialOffset) if err != nil { printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) } go func(pc sarama.PartitionConsumer) { <-closing pc.AsyncClose() }(pc) wg.Add(1) go func(pc sarama.PartitionConsumer) { defer wg.Done() for message := range pc.Messages() { messages <- message } }(pc) } go func() { for msg := range messages { fmt.Printf("Partition:\t%d\n", msg.Partition) fmt.Printf("Offset:\t%d\n", msg.Offset) fmt.Printf("Key:\t%s\n", string(msg.Key)) fmt.Printf("Value:\t%s\n", string(msg.Value)) fmt.Println() } }() wg.Wait() logger.Println("Done consuming topic", *topic) close(messages) if err := c.Close(); err != nil { logger.Println("Failed to close consumer: ", err) } } func getPartitions(c sarama.Consumer) ([]int32, error) { if *partitions == "all" { return c.Partitions(*topic) } tmp := strings.Split(*partitions, ",") var pList []int32 for i := range tmp { val, err := strconv.ParseInt(tmp[i], 10, 32) if err != nil { return nil, err } pList = append(pList, int32(val)) } return pList, nil } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) } func printUsageErrorAndExit(format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) } golang-github-ibm-sarama-1.43.2/tools/kafka-console-partitionconsumer/000077500000000000000000000000001461256741300260105ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/kafka-console-partitionconsumer/.gitignore000066400000000000000000000001051461256741300277740ustar00rootroot00000000000000kafka-console-partitionconsumer kafka-console-partitionconsumer.test golang-github-ibm-sarama-1.43.2/tools/kafka-console-partitionconsumer/README.md000066400000000000000000000017421461256741300272730ustar00rootroot00000000000000# kafka-console-partitionconsumer NOTE: this tool is deprecated in favour of the more general and more powerful `kafka-console-consumer`. A simple command line tool to consume a partition of a topic and print the messages on the standard output. ### Installation go get github.com/IBM/sarama/tools/kafka-console-partitionconsumer ### Usage # Minimum invocation kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 # It will pick up a KAFKA_PEERS environment variable export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 kafka-console-partitionconsumer -topic=test -partition=4 # You can specify the offset you want to start at. It can be either # `oldest`, `newest`, or a specific offset number kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 # Display all command line options kafka-console-partitionconsumer -help kafka-console-partitionconsumer.go000066400000000000000000000050171461256741300345630ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/kafka-console-partitionconsumerpackage main import ( "flag" "fmt" "log" "os" "os/signal" "strconv" "strings" "syscall" "github.com/IBM/sarama" ) var ( brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") topic = flag.String("topic", "", "REQUIRED: the topic to consume") partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") logger = log.New(os.Stderr, "", log.LstdFlags) ) func main() { flag.Parse() if *brokerList == "" { printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") } if *topic == "" { printUsageErrorAndExit("-topic is required") } if *partition == -1 { printUsageErrorAndExit("-partition is required") } if *verbose { sarama.Logger = logger } var ( initialOffset int64 offsetError error ) switch *offset { case "oldest": initialOffset = sarama.OffsetOldest case "newest": initialOffset = sarama.OffsetNewest default: initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) } if offsetError != nil { printUsageErrorAndExit("Invalid initial offset: %s", *offset) } c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) if err != nil { printErrorAndExit(69, "Failed to start consumer: %s", err) } pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) if err != nil { printErrorAndExit(69, "Failed to start partition consumer: %s", err) } go func() { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGTERM, os.Interrupt) <-signals pc.AsyncClose() }() for msg := range pc.Messages() { fmt.Printf("Offset:\t%d\n", msg.Offset) fmt.Printf("Key:\t%s\n", string(msg.Key)) fmt.Printf("Value:\t%s\n", string(msg.Value)) fmt.Println() } if err := c.Close(); err != nil { logger.Println("Failed to close consumer: ", err) } } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) } func printUsageErrorAndExit(format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) } golang-github-ibm-sarama-1.43.2/tools/kafka-console-producer/000077500000000000000000000000001461256741300240465ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/kafka-console-producer/.gitignore000066400000000000000000000000631461256741300260350ustar00rootroot00000000000000kafka-console-producer kafka-console-producer.test golang-github-ibm-sarama-1.43.2/tools/kafka-console-producer/README.md000066400000000000000000000021411461256741300253230ustar00rootroot00000000000000# kafka-console-producer A simple command line tool to produce a single message to Kafka. ### Installation go get github.com/IBM/sarama/tools/kafka-console-producer ### Usage # Minimum invocation kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 # It will pick up a KAFKA_PEERS environment variable export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 kafka-console-producer -topic=test -value=value # It will read the value from stdin by using pipes echo "hello world" | kafka-console-producer -topic=test # Specify a key: echo "hello world" | kafka-console-producer -topic=test -key=key # Partitioning: by default, kafka-console-producer will partition as follows: # - manual partitioning if a -partition is provided # - hash partitioning by key if a -key is provided # - random partitioning otherwise. # # You can override this using the -partitioner argument: echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random # Display all command line options kafka-console-producer -help golang-github-ibm-sarama-1.43.2/tools/kafka-console-producer/kafka-console-producer.go000066400000000000000000000121361461256741300307360ustar00rootroot00000000000000package main import ( "flag" "fmt" "io" "log" "os" "strings" "github.com/rcrowley/go-metrics" "github.com/IBM/sarama" "github.com/IBM/sarama/tools/tls" ) var ( brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") headers = flag.String("headers", "", "The headers of the message to produce. Example: -headers=foo:bar,bar:foo") topic = flag.String("topic", "", "REQUIRED: the topic to produce to") key = flag.String("key", "", "The key of the message to produce. Can be empty.") value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") partition = flag.Int("partition", -1, "The partition to produce to.") verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr") silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") tlsEnabled = flag.Bool("tls-enabled", false, "Whether to enable TLS") tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether skip TLS server cert verification") tlsClientCert = flag.String("tls-client-cert", "", "Client cert for client authentication (use with -tls-enabled and -tls-client-key)") tlsClientKey = flag.String("tls-client-key", "", "Client key for client authentication (use with tls-enabled and -tls-client-cert)") logger = log.New(os.Stderr, "", log.LstdFlags) ) func main() { flag.Parse() if *brokerList == "" { printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") } if *topic == "" { printUsageErrorAndExit("no -topic specified") } if *verbose { sarama.Logger = logger } config := sarama.NewConfig() config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.Return.Successes = true if *tlsEnabled { tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey) if err != nil { printErrorAndExit(69, "Failed to create TLS config: %s", err) } config.Net.TLS.Enable = true config.Net.TLS.Config = tlsConfig config.Net.TLS.Config.InsecureSkipVerify = *tlsSkipVerify } switch *partitioner { case "": if *partition >= 0 { config.Producer.Partitioner = sarama.NewManualPartitioner } else { config.Producer.Partitioner = sarama.NewHashPartitioner } case "hash": config.Producer.Partitioner = sarama.NewHashPartitioner case "random": config.Producer.Partitioner = sarama.NewRandomPartitioner case "manual": config.Producer.Partitioner = sarama.NewManualPartitioner if *partition == -1 { printUsageErrorAndExit("-partition is required when partitioning manually") } default: printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) } message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} if *key != "" { message.Key = sarama.StringEncoder(*key) } if *value != "" { message.Value = sarama.StringEncoder(*value) } else if stdinAvailable() { bytes, err := io.ReadAll(os.Stdin) if err != nil { printErrorAndExit(66, "Failed to read data from the standard input: %s", err) } message.Value = sarama.ByteEncoder(bytes) } else { printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") } if *headers != "" { var hdrs []sarama.RecordHeader arrHdrs := strings.Split(*headers, ",") for _, h := range arrHdrs { if header := strings.Split(h, ":"); len(header) != 2 { printUsageErrorAndExit("-header should be key:value. Example: -headers=foo:bar,bar:foo") } else { hdrs = append(hdrs, sarama.RecordHeader{ Key: []byte(header[0]), Value: []byte(header[1]), }) } } if len(hdrs) != 0 { message.Headers = hdrs } } producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) if err != nil { printErrorAndExit(69, "Failed to open Kafka producer: %s", err) } defer func() { if err := producer.Close(); err != nil { logger.Println("Failed to close Kafka producer cleanly:", err) } }() partition, offset, err := producer.SendMessage(message) if err != nil { printErrorAndExit(69, "Failed to produce message: %s", err) } else if !*silent { fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) } if *showMetrics { metrics.WriteOnce(config.MetricRegistry, os.Stderr) } } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) } func printUsageErrorAndExit(message string) { fmt.Fprintln(os.Stderr, "ERROR:", message) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) } func stdinAvailable() bool { stat, _ := os.Stdin.Stat() return (stat.Mode() & os.ModeCharDevice) == 0 } golang-github-ibm-sarama-1.43.2/tools/kafka-producer-performance/000077500000000000000000000000001461256741300247055ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/kafka-producer-performance/README.md000066400000000000000000000006231461256741300261650ustar00rootroot00000000000000# kafka-producer-performance A command line tool to test producer performance. ### Installation go get github.com/IBM/sarama/tools/kafka-producer-performance ### Usage # Display all command line options kafka-producer-performance -help # Minimum invocation kafka-producer-performance \ -brokers=kafka:9092 \ -message-load=50000 \ -message-size=100 \ -topic=producer_test golang-github-ibm-sarama-1.43.2/tools/kafka-producer-performance/main.go000066400000000000000000000313651461256741300261700ustar00rootroot00000000000000package main import ( "context" "crypto/rand" "crypto/x509" "flag" "fmt" "io" "log" "os" "strings" gosync "sync" "time" "github.com/rcrowley/go-metrics" "github.com/IBM/sarama" "github.com/IBM/sarama/tools/tls" ) var ( sync = flag.Bool( "sync", false, "Use a synchronous producer.", ) messageLoad = flag.Int( "message-load", 0, "REQUIRED: The number of messages to produce to -topic.", ) messageSize = flag.Int( "message-size", 0, "REQUIRED: The approximate size (in bytes) of each message to produce to -topic.", ) brokers = flag.String( "brokers", "", "REQUIRED: A comma separated list of broker addresses.", ) securityProtocol = flag.String( "security-protocol", "PLAINTEXT", "The name of the security protocol to talk to Kafka (PLAINTEXT, SSL).", ) tlsRootCACerts = flag.String( "tls-ca-certs", "", "The path to a file that contains a set of root certificate authorities in PEM format "+ "to trust when verifying broker certificates when -security-protocol=SSL "+ "(leave empty to use the host's root CA set).", ) tlsClientCert = flag.String( "tls-client-cert", "", "The path to a file that contains the client certificate to send to the broker "+ "in PEM format if client authentication is required when -security-protocol=SSL "+ "(leave empty to disable client authentication).", ) tlsClientKey = flag.String( "tls-client-key", "", "The path to a file that contains the client private key linked to the client certificate "+ "in PEM format when -security-protocol=SSL (REQUIRED if tls-client-cert is provided).", ) topic = flag.String( "topic", "", "REQUIRED: The topic to run the performance test on.", ) partition = flag.Int( "partition", -1, "The partition of -topic to run the performance test on.", ) throughput = flag.Int( "throughput", 0, "The maximum number of messages to send per second (0 for no limit).", ) maxOpenRequests = flag.Int( "max-open-requests", 5, "The maximum number of unacknowledged requests the client will send on a single connection before blocking.", ) maxMessageBytes = flag.Int( "max-message-bytes", 1000000, "The max permitted size of a message.", ) requiredAcks = flag.Int( "required-acks", 1, "The required number of acks needed from the broker (-1: all, 0: none, 1: local).", ) timeout = flag.Duration( "timeout", 10*time.Second, "The duration the producer will wait to receive -required-acks.", ) partitioner = flag.String( "partitioner", "roundrobin", "The partitioning scheme to use (hash, manual, random, roundrobin).", ) compression = flag.String( "compression", "none", "The compression method to use (none, gzip, snappy, lz4).", ) flushFrequency = flag.Duration( "flush-frequency", 0, "The best-effort frequency of flushes.", ) flushBytes = flag.Int( "flush-bytes", 0, "The best-effort number of bytes needed to trigger a flush.", ) flushMessages = flag.Int( "flush-messages", 0, "The best-effort number of messages needed to trigger a flush.", ) flushMaxMessages = flag.Int( "flush-max-messages", 0, "The maximum number of messages the producer will send in a single request.", ) clientID = flag.String( "client-id", "sarama", "The client ID sent with every request to the brokers.", ) channelBufferSize = flag.Int( "channel-buffer-size", 256, "The number of events to buffer in internal and external channels.", ) routines = flag.Int( "routines", 1, "The number of routines to send the messages from (-sync only).", ) version = flag.String( "version", "0.8.2.0", "The assumed version of Kafka.", ) verbose = flag.Bool( "verbose", false, "Turn on sarama logging to stderr", ) ) func parseCompression(scheme string) sarama.CompressionCodec { switch scheme { case "none": return sarama.CompressionNone case "gzip": return sarama.CompressionGZIP case "snappy": return sarama.CompressionSnappy case "lz4": return sarama.CompressionLZ4 default: printUsageErrorAndExit(fmt.Sprintf("Unknown -compression: %s", scheme)) } panic("should not happen") } func parsePartitioner(scheme string, partition int) sarama.PartitionerConstructor { if partition < 0 && scheme == "manual" { printUsageErrorAndExit("-partition must not be -1 for -partitioning=manual") } switch scheme { case "manual": return sarama.NewManualPartitioner case "hash": return sarama.NewHashPartitioner case "random": return sarama.NewRandomPartitioner case "roundrobin": return sarama.NewRoundRobinPartitioner default: printUsageErrorAndExit(fmt.Sprintf("Unknown -partitioning: %s", scheme)) } panic("should not happen") } func parseVersion(version string) sarama.KafkaVersion { result, err := sarama.ParseKafkaVersion(version) if err != nil { printUsageErrorAndExit(fmt.Sprintf("unknown -version: %s", version)) } return result } func generateMessages(topic string, partition, messageLoad, messageSize int) []*sarama.ProducerMessage { messages := make([]*sarama.ProducerMessage, messageLoad) for i := 0; i < messageLoad; i++ { payload := make([]byte, messageSize) if _, err := rand.Read(payload); err != nil { printErrorAndExit(69, "Failed to generate message payload: %s", err) } messages[i] = &sarama.ProducerMessage{ Topic: topic, Partition: int32(partition), Value: sarama.ByteEncoder(payload), } } return messages } func main() { flag.Parse() if *brokers == "" { printUsageErrorAndExit("-brokers is required") } if *topic == "" { printUsageErrorAndExit("-topic is required") } if *messageLoad <= 0 { printUsageErrorAndExit("-message-load must be greater than 0") } if *messageSize <= 0 { printUsageErrorAndExit("-message-size must be greater than 0") } if *routines < 1 || *routines > *messageLoad { printUsageErrorAndExit("-routines must be greater than 0 and less than or equal to -message-load") } if *securityProtocol != "PLAINTEXT" && *securityProtocol != "SSL" { printUsageErrorAndExit(fmt.Sprintf("-security-protocol %q is not supported", *securityProtocol)) } if *verbose { sarama.Logger = log.New(os.Stderr, "", log.LstdFlags) } config := sarama.NewConfig() config.Net.MaxOpenRequests = *maxOpenRequests config.Producer.MaxMessageBytes = *maxMessageBytes config.Producer.RequiredAcks = sarama.RequiredAcks(*requiredAcks) config.Producer.Timeout = *timeout config.Producer.Partitioner = parsePartitioner(*partitioner, *partition) config.Producer.Compression = parseCompression(*compression) config.Producer.Flush.Frequency = *flushFrequency config.Producer.Flush.Bytes = *flushBytes config.Producer.Flush.Messages = *flushMessages config.Producer.Flush.MaxMessages = *flushMaxMessages config.Producer.Return.Successes = true config.ClientID = *clientID config.ChannelBufferSize = *channelBufferSize config.Version = parseVersion(*version) if *securityProtocol == "SSL" { tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey) if err != nil { printErrorAndExit(69, "failed to load client certificate from: %s and private key from: %s: %v", *tlsClientCert, *tlsClientKey, err) } if *tlsRootCACerts != "" { rootCAsBytes, err := os.ReadFile(*tlsRootCACerts) if err != nil { printErrorAndExit(69, "failed to read root CA certificates: %v", err) } certPool := x509.NewCertPool() if !certPool.AppendCertsFromPEM(rootCAsBytes) { printErrorAndExit(69, "failed to load root CA certificates from file: %s", *tlsRootCACerts) } // Use specific root CA set vs the host's set tlsConfig.RootCAs = certPool } config.Net.TLS.Enable = true config.Net.TLS.Config = tlsConfig } if err := config.Validate(); err != nil { printErrorAndExit(69, "Invalid configuration: %s", err) } // Print out metrics periodically. done := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) go func(ctx context.Context) { defer close(done) t := time.Tick(5 * time.Second) for { select { case <-t: printMetrics(os.Stdout, config.MetricRegistry) case <-ctx.Done(): return } } }(ctx) brokers := strings.Split(*brokers, ",") if *sync { runSyncProducer(*topic, *partition, *messageLoad, *messageSize, *routines, config, brokers, *throughput) } else { runAsyncProducer(*topic, *partition, *messageLoad, *messageSize, config, brokers, *throughput) } cancel() <-done } func runAsyncProducer(topic string, partition, messageLoad, messageSize int, config *sarama.Config, brokers []string, throughput int) { producer, err := sarama.NewAsyncProducer(brokers, config) if err != nil { printErrorAndExit(69, "Failed to create producer: %s", err) } defer func() { // Print final metrics. printMetrics(os.Stdout, config.MetricRegistry) if err := producer.Close(); err != nil { printErrorAndExit(69, "Failed to close producer: %s", err) } }() messages := generateMessages(topic, partition, messageLoad, messageSize) messagesDone := make(chan struct{}) go func() { for i := 0; i < messageLoad; i++ { select { case <-producer.Successes(): case err = <-producer.Errors(): printErrorAndExit(69, "%s", err) } } messagesDone <- struct{}{} }() if throughput > 0 { ticker := time.NewTicker(time.Second) for idx, message := range messages { producer.Input() <- message if (idx+1)%throughput == 0 { <-ticker.C } } ticker.Stop() } else { for _, message := range messages { producer.Input() <- message } } <-messagesDone close(messagesDone) } func runSyncProducer(topic string, partition, messageLoad, messageSize, routines int, config *sarama.Config, brokers []string, throughput int) { producer, err := sarama.NewSyncProducer(brokers, config) if err != nil { printErrorAndExit(69, "Failed to create producer: %s", err) } defer func() { // Print final metrics. printMetrics(os.Stdout, config.MetricRegistry) if err := producer.Close(); err != nil { printErrorAndExit(69, "Failed to close producer: %s", err) } }() messages := make([][]*sarama.ProducerMessage, routines) for i := 0; i < routines; i++ { if i == routines-1 { messages[i] = generateMessages(topic, partition, messageLoad/routines+messageLoad%routines, messageSize) } else { messages[i] = generateMessages(topic, partition, messageLoad/routines, messageSize) } } var wg gosync.WaitGroup if throughput > 0 { for _, messages := range messages { messages := messages wg.Add(1) go func() { ticker := time.NewTicker(time.Second) for _, message := range messages { for i := 0; i < throughput; i++ { _, _, err = producer.SendMessage(message) if err != nil { printErrorAndExit(69, "Failed to send message: %s", err) } } <-ticker.C } ticker.Stop() wg.Done() }() } } else { for _, messages := range messages { messages := messages wg.Add(1) go func() { for _, message := range messages { _, _, err = producer.SendMessage(message) if err != nil { printErrorAndExit(69, "Failed to send message: %s", err) } } wg.Done() }() } } wg.Wait() } func printMetrics(w io.Writer, r metrics.Registry) { recordSendRateMetric := r.Get("record-send-rate") requestLatencyMetric := r.Get("request-latency-in-ms") outgoingByteRateMetric := r.Get("outgoing-byte-rate") requestsInFlightMetric := r.Get("requests-in-flight") if recordSendRateMetric == nil || requestLatencyMetric == nil || outgoingByteRateMetric == nil || requestsInFlightMetric == nil { return } recordSendRate := recordSendRateMetric.(metrics.Meter).Snapshot() requestLatency := requestLatencyMetric.(metrics.Histogram).Snapshot() requestLatencyPercentiles := requestLatency.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) outgoingByteRate := outgoingByteRateMetric.(metrics.Meter).Snapshot() requestsInFlight := requestsInFlightMetric.(metrics.Counter).Count() fmt.Fprintf(w, "%d records sent, %.1f records/sec (%.2f MiB/sec ingress, %.2f MiB/sec egress), "+ "%.1f ms avg latency, %.1f ms stddev, %.1f ms 50th, %.1f ms 75th, "+ "%.1f ms 95th, %.1f ms 99th, %.1f ms 99.9th, %d total req. in flight\n", recordSendRate.Count(), recordSendRate.RateMean(), recordSendRate.RateMean()*float64(*messageSize)/1024/1024, outgoingByteRate.RateMean()/1024/1024, requestLatency.Mean(), requestLatency.StdDev(), requestLatencyPercentiles[0], requestLatencyPercentiles[1], requestLatencyPercentiles[2], requestLatencyPercentiles[3], requestLatencyPercentiles[4], requestsInFlight, ) } func printUsageErrorAndExit(message string) { fmt.Fprintln(os.Stderr, "ERROR:", message) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) } golang-github-ibm-sarama-1.43.2/tools/tls/000077500000000000000000000000001461256741300203125ustar00rootroot00000000000000golang-github-ibm-sarama-1.43.2/tools/tls/config.go000066400000000000000000000006111461256741300221040ustar00rootroot00000000000000package tls import "crypto/tls" func NewConfig(clientCert, clientKey string) (*tls.Config, error) { tlsConfig := tls.Config{ MinVersion: tls.VersionTLS12, } if clientCert != "" && clientKey != "" { cert, err := tls.LoadX509KeyPair(clientCert, clientKey) if err != nil { return &tlsConfig, err } tlsConfig.Certificates = []tls.Certificate{cert} } return &tlsConfig, nil } golang-github-ibm-sarama-1.43.2/transaction_manager.go000066400000000000000000000723621461256741300227300ustar00rootroot00000000000000package sarama import ( "errors" "fmt" "strings" "sync" "time" ) // ProducerTxnStatusFlag mark current transaction status. type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady // ProducerTxnFlagInTransaction when transaction is started ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction // ProducerTxnFlagAbortingTransaction when committing txn ProducerTxnFlagAbortingTransaction // ProducerTxnFlagAbortableError when producer encounter an abortable error // Must call AbortTxn in this case. ProducerTxnFlagAbortableError // ProducerTxnFlagFatalError when producer encounter an fatal error // Must Close an recreate it. ProducerTxnFlagFatalError ) func (s ProducerTxnStatusFlag) String() string { status := make([]string, 0) if s&ProducerTxnFlagUninitialized != 0 { status = append(status, "ProducerTxnStateUninitialized") } if s&ProducerTxnFlagInitializing != 0 { status = append(status, "ProducerTxnStateInitializing") } if s&ProducerTxnFlagReady != 0 { status = append(status, "ProducerTxnStateReady") } if s&ProducerTxnFlagInTransaction != 0 { status = append(status, "ProducerTxnStateInTransaction") } if s&ProducerTxnFlagEndTransaction != 0 { status = append(status, "ProducerTxnStateEndTransaction") } if s&ProducerTxnFlagInError != 0 { status = append(status, "ProducerTxnStateInError") } if s&ProducerTxnFlagCommittingTransaction != 0 { status = append(status, "ProducerTxnStateCommittingTransaction") } if s&ProducerTxnFlagAbortingTransaction != 0 { status = append(status, "ProducerTxnStateAbortingTransaction") } if s&ProducerTxnFlagAbortableError != 0 { status = append(status, "ProducerTxnStateAbortableError") } if s&ProducerTxnFlagFatalError != 0 { status = append(status, "ProducerTxnStateFatalError") } return strings.Join(status, "|") } // transactionManager keeps the state necessary to ensure idempotent production type transactionManager struct { producerID int64 producerEpoch int16 sequenceNumbers map[string]int32 mutex sync.Mutex transactionalID string transactionTimeout time.Duration client Client // when kafka cluster is at least 2.5.0. // used to recover when producer failed. coordinatorSupportsBumpingEpoch bool // When producer need to bump it's epoch. epochBumpRequired bool // Record last seen error. lastError error // Ensure that status is never accessed with a race-condition. statusLock sync.RWMutex status ProducerTxnStatusFlag // Ensure that only one goroutine will update partitions in current transaction. partitionInTxnLock sync.Mutex pendingPartitionsInCurrentTxn topicPartitionSet partitionsInCurrentTxn topicPartitionSet // Offsets to add to transaction. offsetsInCurrentTxn map[string]topicPartitionOffsets } const ( noProducerID = -1 noProducerEpoch = -1 // see publishTxnPartitions comment. addPartitionsRetryBackoff = 20 * time.Millisecond ) // txnmngr allowed transitions. var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagUninitialized: { ProducerTxnFlagReady, ProducerTxnFlagInError, }, // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, // When beginTxn has been called ProducerTxnFlagInTransaction: { // When calling commit or abort ProducerTxnFlagEndTransaction, // When got an error ProducerTxnFlagInError, }, ProducerTxnFlagEndTransaction: { // When epoch bump ProducerTxnFlagInitializing, // When commit is good ProducerTxnFlagReady, // When got an error ProducerTxnFlagInError, }, // Need to abort transaction ProducerTxnFlagAbortableError: { // Call AbortTxn ProducerTxnFlagAbortingTransaction, // When got an error ProducerTxnFlagInError, }, // Need to close producer ProducerTxnFlagFatalError: { ProducerTxnFlagFatalError, }, } type topicPartition struct { topic string partition int32 } // to ensure that we don't do a full scan every time a partition or an offset is added. type ( topicPartitionSet map[topicPartition]struct{} topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata ) func (s topicPartitionSet) mapToRequest() map[string][]int32 { result := make(map[string][]int32, len(s)) for tp := range s { result[tp.topic] = append(result[tp.topic], tp.partition) } return result } func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata { result := make(map[string][]*PartitionOffsetMetadata, len(s)) for tp, offset := range s { result[tp.topic] = append(result[tp.topic], offset) } return result } // Return true if current transition is allowed. func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool { for status, allowedTransitions := range producerTxnTransitions { if status&t.status != 0 { for _, allowedTransition := range allowedTransitions { if allowedTransition&target != 0 { return true } } } } return false } // Get current transaction status. func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag { t.statusLock.RLock() defer t.statusLock.RUnlock() return t.status } // Try to transition to a valid status and return an error otherwise. func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error { t.statusLock.Lock() defer t.statusLock.Unlock() if !t.isTransitionValid(target) { return ErrTransitionNotAllowed } if target&ProducerTxnFlagInError != 0 { if err == nil { return ErrCannotTransitionNilError } t.lastError = err } else { t.lastError = nil } DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target) t.status = target return err } func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { key := fmt.Sprintf("%s-%d", topic, partition) t.mutex.Lock() defer t.mutex.Unlock() sequence := t.sequenceNumbers[key] t.sequenceNumbers[key] = sequence + 1 return sequence, t.producerEpoch } func (t *transactionManager) bumpEpoch() { t.mutex.Lock() defer t.mutex.Unlock() t.producerEpoch++ for k := range t.sequenceNumbers { t.sequenceNumbers[k] = 0 } } func (t *transactionManager) getProducerID() (int64, int16) { t.mutex.Lock() defer t.mutex.Unlock() return t.producerID, t.producerEpoch } // Compute retry backoff considered current attempts. func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration { if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { maxRetries := t.client.Config().Producer.Transaction.Retry.Max retries := maxRetries - attemptsRemaining return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) } return t.client.Config().Producer.Transaction.Retry.Backoff } // return true is txnmngr is transactinal. func (t *transactionManager) isTransactional() bool { return t.transactionalID != "" } // add specified offsets to current transaction. func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error { t.mutex.Lock() defer t.mutex.Unlock() if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { return ErrTransactionNotReady } if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { return t.lastError } if _, ok := t.offsetsInCurrentTxn[groupId]; !ok { t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{} } for topic, offsets := range offsetsToAdd { for _, offset := range offsets { tp := topicPartition{topic: topic, partition: offset.Partition} t.offsetsInCurrentTxn[groupId][tp] = offset } } return nil } // send txnmgnr save offsets to transaction coordinator. func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) { // First AddOffsetsToTxn attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max exec := func(run func() (bool, error), err error) error { for attemptsRemaining >= 0 { var retry bool retry, err = run() if !retry { return err } backoff := t.computeBackoff(attemptsRemaining) Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) time.Sleep(backoff) attemptsRemaining-- } return err } lastError := exec(func() (bool, error) { coordinator, err := t.client.TransactionCoordinator(t.transactionalID) if err != nil { return true, err } request := &AddOffsetsToTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, } if t.client.Config().Version.IsAtLeast(V2_7_0_0) { // Version 2 adds the support for new error code PRODUCER_FENCED. request.Version = 2 } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. request.Version = 1 } response, err := coordinator.AddOffsetsToTxn(request) if err != nil { // If an error occurred try to refresh current transaction coordinator. _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) return true, err } if response == nil { // If no response is returned just retry. return true, ErrTxnUnableToParseResponse } if response.Err == ErrNoError { DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n", t.transactionalID, groupId, response) // If no error, just exit. return false, nil } switch response.Err { case ErrConsumerCoordinatorNotAvailable: fallthrough case ErrNotCoordinatorForConsumer: _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) fallthrough case ErrOffsetsLoadInProgress: fallthrough case ErrConcurrentTransactions: // Retry case ErrUnknownProducerID: fallthrough case ErrInvalidProducerIDMapping: return false, t.abortableErrorIfPossible(response.Err) case ErrGroupAuthorizationFailed: return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) default: // Others are fatal return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) } return true, response.Err }, nil) if lastError != nil { return offsets, lastError } resultOffsets := offsets // Then TxnOffsetCommit // note the result is not completed until the TxnOffsetCommit returns attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) { var r topicPartitionOffsets for attemptsRemaining >= 0 { var retry bool r, retry, err = run() if !retry { return r, err } backoff := t.computeBackoff(attemptsRemaining) Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n", t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) time.Sleep(backoff) attemptsRemaining-- } return r, err } return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) { consumerGroupCoordinator, err := t.client.Coordinator(groupId) if err != nil { return resultOffsets, true, err } request := &TxnOffsetCommitRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, Topics: offsets.mapToRequest(), } if t.client.Config().Version.IsAtLeast(V2_1_0_0) { // Version 2 adds the committed leader epoch. request.Version = 2 } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. request.Version = 1 } responses, err := consumerGroupCoordinator.TxnOffsetCommit(request) if err != nil { _ = consumerGroupCoordinator.Close() _ = t.client.RefreshCoordinator(groupId) return resultOffsets, true, err } if responses == nil { return resultOffsets, true, ErrTxnUnableToParseResponse } var responseErrors []error failedTxn := topicPartitionOffsets{} for topic, partitionErrors := range responses.Topics { for _, partitionError := range partitionErrors { switch partitionError.Err { case ErrNoError: continue // If the topic is unknown or the coordinator is loading, retry with the current coordinator case ErrRequestTimedOut: fallthrough case ErrConsumerCoordinatorNotAvailable: fallthrough case ErrNotCoordinatorForConsumer: _ = consumerGroupCoordinator.Close() _ = t.client.RefreshCoordinator(groupId) fallthrough case ErrUnknownTopicOrPartition: fallthrough case ErrOffsetsLoadInProgress: // Do nothing just retry case ErrIllegalGeneration: fallthrough case ErrUnknownMemberId: fallthrough case ErrFencedInstancedId: fallthrough case ErrGroupAuthorizationFailed: return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err) default: // Others are fatal return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err) } tp := topicPartition{topic: topic, partition: partitionError.Partition} failedTxn[tp] = offsets[tp] responseErrors = append(responseErrors, partitionError.Err) } } resultOffsets = failedTxn if len(resultOffsets) == 0 { DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", t.transactionalID, groupId) return resultOffsets, false, nil } return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...) }, nil) } func (t *transactionManager) initProducerId() (int64, int16, error) { isEpochBump := false req := &InitProducerIDRequest{} if t.isTransactional() { req.TransactionalID = &t.transactionalID req.TransactionTimeout = t.transactionTimeout } if t.client.Config().Version.IsAtLeast(V2_5_0_0) { if t.client.Config().Version.IsAtLeast(V2_7_0_0) { // Version 4 adds the support for new error code PRODUCER_FENCED. req.Version = 4 } else { // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try // to resume after an INVALID_PRODUCER_EPOCH error req.Version = 3 } isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch t.coordinatorSupportsBumpingEpoch = true req.ProducerID = t.producerID req.ProducerEpoch = t.producerEpoch } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) { // Version 2 is the first flexible version. req.Version = 2 } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. req.Version = 1 } if isEpochBump { err := t.transitionTo(ProducerTxnFlagInitializing, nil) if err != nil { return -1, -1, err } DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n", t.transactionalID) } else { DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n", t.transactionalID, t.producerID, t.producerEpoch) } attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) { pid := int64(-1) pepoch := int16(-1) for attemptsRemaining >= 0 { var retry bool pid, pepoch, retry, err = run() if !retry { return pid, pepoch, err } backoff := t.computeBackoff(attemptsRemaining) Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n", t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) time.Sleep(backoff) attemptsRemaining-- } return -1, -1, err } return exec(func() (int64, int16, bool, error) { var err error var coordinator *Broker if t.isTransactional() { coordinator, err = t.client.TransactionCoordinator(t.transactionalID) } else { coordinator = t.client.LeastLoadedBroker() } if err != nil { return -1, -1, true, err } response, err := coordinator.InitProducerID(req) if err != nil { if t.isTransactional() { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) } return -1, -1, true, err } if response == nil { return -1, -1, true, ErrTxnUnableToParseResponse } if response.Err == ErrNoError { if isEpochBump { t.sequenceNumbers = make(map[string]int32) } err := t.transitionTo(ProducerTxnFlagReady, nil) if err != nil { return -1, -1, true, err } DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n", t.transactionalID, response) return response.ProducerID, response.ProducerEpoch, false, nil } switch response.Err { // Retriable errors case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress: if t.isTransactional() { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) } // Fatal errors default: return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) } return -1, -1, true, response.Err }, nil) } // if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal. func (t *transactionManager) abortableErrorIfPossible(err error) error { if t.coordinatorSupportsBumpingEpoch { t.epochBumpRequired = true return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) } return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) } // End current transaction. func (t *transactionManager) completeTransaction() error { if t.epochBumpRequired { err := t.transitionTo(ProducerTxnFlagInitializing, nil) if err != nil { return err } } else { err := t.transitionTo(ProducerTxnFlagReady, nil) if err != nil { return err } } t.lastError = nil t.epochBumpRequired = false t.partitionsInCurrentTxn = topicPartitionSet{} t.pendingPartitionsInCurrentTxn = topicPartitionSet{} t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{} return nil } // send EndTxn request with commit flag. (true when committing false otherwise) func (t *transactionManager) endTxn(commit bool) error { attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max exec := func(run func() (bool, error), err error) error { for attemptsRemaining >= 0 { var retry bool retry, err = run() if !retry { return err } backoff := t.computeBackoff(attemptsRemaining) Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) time.Sleep(backoff) attemptsRemaining-- } return err } return exec(func() (bool, error) { coordinator, err := t.client.TransactionCoordinator(t.transactionalID) if err != nil { return true, err } request := &EndTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, TransactionResult: commit, } if t.client.Config().Version.IsAtLeast(V2_7_0_0) { // Version 2 adds the support for new error code PRODUCER_FENCED. request.Version = 2 } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. request.Version = 1 } response, err := coordinator.EndTxn(request) if err != nil { // Always retry on network error _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) return true, err } if response == nil { return true, ErrTxnUnableToParseResponse } if response.Err == ErrNoError { DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n", t.transactionalID, response) return false, t.completeTransaction() } switch response.Err { // Need to refresh coordinator case ErrConsumerCoordinatorNotAvailable: fallthrough case ErrNotCoordinatorForConsumer: _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) fallthrough case ErrOffsetsLoadInProgress: fallthrough case ErrConcurrentTransactions: // Just retry case ErrUnknownProducerID: fallthrough case ErrInvalidProducerIDMapping: return false, t.abortableErrorIfPossible(response.Err) // Fatal errors default: return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) } return true, response.Err }, nil) } // We will try to publish associated offsets for each groups // then send endtxn request to mark transaction as finished. func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { return t.lastError } // if no records has been sent don't do anything. if len(t.partitionsInCurrentTxn) == 0 { return t.completeTransaction() } epochBump := t.epochBumpRequired // If we're aborting the transaction, so there should be no need to add offsets. if commit && len(t.offsetsInCurrentTxn) > 0 { for group, offsets := range t.offsetsInCurrentTxn { newOffsets, err := t.publishOffsetsToTxn(offsets, group) if err != nil { t.offsetsInCurrentTxn[group] = newOffsets return err } delete(t.offsetsInCurrentTxn, group) } } if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { return t.lastError } if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) { err := t.endTxn(commit) if err != nil { return err } if !epochBump { return nil } } // reset pid and epoch if needed. return t.initializeTransactions() } // called before sending any transactional record // won't do anything if current topic-partition is already added to transaction. func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) { if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return } tp := topicPartition{topic: topic, partition: partition} t.partitionInTxnLock.Lock() defer t.partitionInTxnLock.Unlock() if _, ok := t.partitionsInCurrentTxn[tp]; ok { // partition is already added return } t.pendingPartitionsInCurrentTxn[tp] = struct{}{} } // Makes a request to kafka to add a list of partitions ot the current transaction. func (t *transactionManager) publishTxnPartitions() error { t.partitionInTxnLock.Lock() defer t.partitionInTxnLock.Unlock() if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } if len(t.pendingPartitionsInCurrentTxn) == 0 { return nil } // Remove the partitions from the pending set regardless of the result. We use the presence // of partitions in the pending set to know when it is not safe to send batches. However, if // the partitions failed to be added and we enter an error state, we expect the batches to be // aborted anyway. In this case, we must be able to continue sending the batches which are in // retry for partitions that were successfully added. removeAllPartitionsOnFatalOrAbortedError := func() { t.pendingPartitionsInCurrentTxn = topicPartitionSet{} } // We only want to reduce the backoff when retrying the first AddPartition which errored out due to a // CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and // we don't want to wait too long before trying to start the new one. // // This is only a temporary fix, the long term solution is being tracked in // https://issues.apache.org/jira/browse/KAFKA-5482 retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff computeBackoff := func(attemptsRemaining int) time.Duration { if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { maxRetries := t.client.Config().Producer.Transaction.Retry.Max retries := maxRetries - attemptsRemaining return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) } return retryBackoff } attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max exec := func(run func() (bool, error), err error) error { for attemptsRemaining >= 0 { var retry bool retry, err = run() if !retry { return err } backoff := computeBackoff(attemptsRemaining) Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err) time.Sleep(backoff) attemptsRemaining-- } return err } return exec(func() (bool, error) { coordinator, err := t.client.TransactionCoordinator(t.transactionalID) if err != nil { return true, err } request := &AddPartitionsToTxnRequest{ TransactionalID: t.transactionalID, ProducerID: t.producerID, ProducerEpoch: t.producerEpoch, TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(), } if t.client.Config().Version.IsAtLeast(V2_7_0_0) { // Version 2 adds the support for new error code PRODUCER_FENCED. request.Version = 2 } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { // Version 1 is the same as version 0. request.Version = 1 } addPartResponse, err := coordinator.AddPartitionsToTxn(request) if err != nil { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) return true, err } if addPartResponse == nil { return true, ErrTxnUnableToParseResponse } // remove from the list partitions that have been successfully updated var responseErrors []error for topic, results := range addPartResponse.Errors { for _, response := range results { tp := topicPartition{topic: topic, partition: response.Partition} switch response.Err { case ErrNoError: // Mark partition as added to transaction t.partitionsInCurrentTxn[tp] = struct{}{} delete(t.pendingPartitionsInCurrentTxn, tp) continue case ErrConsumerCoordinatorNotAvailable: fallthrough case ErrNotCoordinatorForConsumer: _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) fallthrough case ErrUnknownTopicOrPartition: fallthrough case ErrOffsetsLoadInProgress: // Retry topicPartition case ErrConcurrentTransactions: if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff { retryBackoff = addPartitionsRetryBackoff } case ErrOperationNotAttempted: fallthrough case ErrTopicAuthorizationFailed: removeAllPartitionsOnFatalOrAbortedError() return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) case ErrUnknownProducerID: fallthrough case ErrInvalidProducerIDMapping: removeAllPartitionsOnFatalOrAbortedError() return false, t.abortableErrorIfPossible(response.Err) // Fatal errors default: removeAllPartitionsOnFatalOrAbortedError() return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) } responseErrors = append(responseErrors, response.Err) } } // handle end if len(t.pendingPartitionsInCurrentTxn) == 0 { DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n", t.transactionalID, addPartResponse) return false, nil } return true, Wrap(ErrAddPartitionsToTxn, responseErrors...) }, nil) } // Build a new transaction manager sharing producer client. func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { txnmgr := &transactionManager{ producerID: noProducerID, producerEpoch: noProducerEpoch, client: client, pendingPartitionsInCurrentTxn: topicPartitionSet{}, partitionsInCurrentTxn: topicPartitionSet{}, offsetsInCurrentTxn: make(map[string]topicPartitionOffsets), status: ProducerTxnFlagUninitialized, } if conf.Producer.Idempotent { txnmgr.transactionalID = conf.Producer.Transaction.ID txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout txnmgr.sequenceNumbers = make(map[string]int32) txnmgr.mutex = sync.Mutex{} var err error txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId() if err != nil { return nil, err } Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch) } return txnmgr, nil } // re-init producer-id and producer-epoch if needed. func (t *transactionManager) initializeTransactions() (err error) { t.producerID, t.producerEpoch, err = t.initProducerId() return } golang-github-ibm-sarama-1.43.2/transaction_manager_test.go000066400000000000000000000737631461256741300237750ustar00rootroot00000000000000package sarama import ( "errors" "testing" "github.com/stretchr/testify/require" ) func TestTransitions(t *testing.T) { testError := errors.New("test") type testCase struct { transitions []ProducerTxnStatusFlag expectedError error } testCases := []testCase{ { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagUninitialized, ProducerTxnFlagReady, ProducerTxnFlagInTransaction, ProducerTxnFlagEndTransaction | ProducerTxnFlagCommittingTransaction, ProducerTxnFlagReady, }, expectedError: nil, }, { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagUninitialized, ProducerTxnFlagReady, ProducerTxnFlagInTransaction, ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction, ProducerTxnFlagReady, }, expectedError: nil, }, { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagUninitialized, ProducerTxnFlagReady, ProducerTxnFlagInTransaction, ProducerTxnFlagEndTransaction, ProducerTxnFlagInError | ProducerTxnFlagAbortableError, }, expectedError: testError, }, { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagInError | ProducerTxnFlagAbortableError, ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction, ProducerTxnFlagReady, }, expectedError: nil, }, { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagInError | ProducerTxnFlagAbortableError, ProducerTxnFlagEndTransaction | ProducerTxnFlagCommittingTransaction, }, expectedError: ErrTransitionNotAllowed, }, { transitions: []ProducerTxnStatusFlag{ ProducerTxnFlagInError | ProducerTxnFlagFatalError, ProducerTxnFlagEndTransaction | ProducerTxnFlagAbortingTransaction, }, expectedError: ErrTransitionNotAllowed, }, } for _, tc := range testCases { txnmgr := transactionManager{} txnmgr.status = tc.transitions[0] var lastError error for i := 1; i < len(tc.transitions); i++ { var baseErr error if tc.transitions[i]&ProducerTxnFlagInError != 0 { baseErr = testError } lastError = txnmgr.transitionTo(tc.transitions[i], baseErr) } require.Equal(t, tc.expectedError, lastError, tc) } } func TestTxnmgrInitProducerIdTxn(t *testing.T) { broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) broker.Returns(metadataLeader) config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) require.Equal(t, int64(1), txmng.producerID) require.Equal(t, int16(0), txmng.producerEpoch) require.Equal(t, ProducerTxnFlagReady, txmng.status) } // TestTxnmgrInitProducerIdTxnCoordinatorLoading ensure we retry initProducerId when either FindCoordinator or InitProducerID returns ErrOffsetsLoadInProgress func TestTxnmgrInitProducerIdTxnCoordinatorLoading(t *testing.T) { config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "txid-group" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 broker := NewMockBroker(t, 1) defer broker.Close() broker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetController(broker.BrokerID()). SetBroker(broker.Addr(), broker.BrokerID()), "FindCoordinatorRequest": NewMockSequence( NewMockFindCoordinatorResponse(t). SetError(CoordinatorTransaction, "txid-group", ErrOffsetsLoadInProgress), NewMockFindCoordinatorResponse(t). SetError(CoordinatorTransaction, "txid-group", ErrOffsetsLoadInProgress), NewMockFindCoordinatorResponse(t). SetCoordinator(CoordinatorTransaction, "txid-group", broker), ), "InitProducerIDRequest": NewMockSequence( NewMockInitProducerIDResponse(t). SetError(ErrOffsetsLoadInProgress), NewMockInitProducerIDResponse(t). SetError(ErrOffsetsLoadInProgress), NewMockInitProducerIDResponse(t). SetProducerID(1). SetProducerEpoch(0), ), }) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() txmng, err := newTransactionManager(config, client) require.NoError(t, err) require.Equal(t, int64(1), txmng.producerID) require.Equal(t, int16(0), txmng.producerEpoch) require.Equal(t, ProducerTxnFlagReady, txmng.status) } func TestMaybeAddPartitionToCurrentTxn(t *testing.T) { type testCase struct { initialFlags ProducerTxnStatusFlag initialPartitionsInCurrentTxn topicPartitionSet initialPendingPartitionsInCurrentTxn topicPartitionSet tpToAdd map[string][]int32 expectedPendingPartitions topicPartitionSet expectedPartitionsInTxn topicPartitionSet } testCases := []testCase{ { initialFlags: ProducerTxnFlagInTransaction, initialPartitionsInCurrentTxn: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, initialPendingPartitionsInCurrentTxn: topicPartitionSet{}, tpToAdd: map[string][]int32{ "test-topic": { 0, }, }, expectedPendingPartitions: topicPartitionSet{}, expectedPartitionsInTxn: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, }, { initialFlags: ProducerTxnFlagInTransaction, initialPartitionsInCurrentTxn: topicPartitionSet{}, initialPendingPartitionsInCurrentTxn: topicPartitionSet{}, tpToAdd: map[string][]int32{ "test-topic": { 0, }, }, expectedPendingPartitions: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, expectedPartitionsInTxn: topicPartitionSet{}, }, { initialFlags: ProducerTxnFlagInTransaction, initialPartitionsInCurrentTxn: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, initialPendingPartitionsInCurrentTxn: topicPartitionSet{}, tpToAdd: map[string][]int32{ "test-topic": { 0, }, }, expectedPendingPartitions: topicPartitionSet{}, expectedPartitionsInTxn: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, }, { initialFlags: ProducerTxnFlagInTransaction, initialPartitionsInCurrentTxn: topicPartitionSet{}, initialPendingPartitionsInCurrentTxn: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, tpToAdd: map[string][]int32{ "test-topic": { 0, }, }, expectedPendingPartitions: topicPartitionSet{ {topic: "test-topic", partition: 0}: struct{}{}, }, expectedPartitionsInTxn: topicPartitionSet{}, }, { initialFlags: ProducerTxnFlagInError, initialPartitionsInCurrentTxn: topicPartitionSet{}, initialPendingPartitionsInCurrentTxn: topicPartitionSet{}, tpToAdd: map[string][]int32{ "test-topic": { 0, }, }, expectedPendingPartitions: topicPartitionSet{}, expectedPartitionsInTxn: topicPartitionSet{}, }, } broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Transaction.Retry.Max = 0 config.Producer.Transaction.Retry.Backoff = 0 for _, tc := range testCases { func() { broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) txmng.partitionsInCurrentTxn = tc.initialPartitionsInCurrentTxn txmng.pendingPartitionsInCurrentTxn = tc.initialPendingPartitionsInCurrentTxn txmng.status = tc.initialFlags for topic, partitions := range tc.tpToAdd { for _, partition := range partitions { txmng.maybeAddPartitionToCurrentTxn(topic, partition) } } require.Equal(t, tc.expectedPartitionsInTxn, txmng.partitionsInCurrentTxn, tc) require.Equal(t, tc.expectedPendingPartitions, txmng.pendingPartitionsInCurrentTxn, tc) }() } } func TestAddOffsetsToTxn(t *testing.T) { type testCase struct { brokerErr KError initialFlags ProducerTxnStatusFlag expectedFlags ProducerTxnStatusFlag expectedError error newOffsets topicPartitionOffsets } originalOffsets := topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, } testCases := []testCase{ { brokerErr: ErrNoError, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagInTransaction, expectedError: nil, newOffsets: topicPartitionOffsets{}, }, { brokerErr: ErrConsumerCoordinatorNotAvailable, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagInTransaction, expectedError: ErrConsumerCoordinatorNotAvailable, newOffsets: originalOffsets, }, { brokerErr: ErrNotCoordinatorForConsumer, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagInTransaction, expectedError: ErrNotCoordinatorForConsumer, newOffsets: originalOffsets, }, { brokerErr: ErrOffsetsLoadInProgress, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagInTransaction, expectedError: ErrOffsetsLoadInProgress, newOffsets: originalOffsets, }, { brokerErr: ErrConcurrentTransactions, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagInTransaction, expectedError: ErrConcurrentTransactions, newOffsets: originalOffsets, }, { brokerErr: ErrUnknownProducerID, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrUnknownProducerID, newOffsets: originalOffsets, }, { brokerErr: ErrInvalidProducerIDMapping, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrInvalidProducerIDMapping, newOffsets: originalOffsets, }, { brokerErr: ErrGroupAuthorizationFailed, initialFlags: ProducerTxnFlagInTransaction, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrGroupAuthorizationFailed, newOffsets: originalOffsets, }, } broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Transaction.Retry.Max = 0 config.Producer.Transaction.Retry.Backoff = 0 offsets := topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, } for _, tc := range testCases { func() { broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) txmng.status = tc.initialFlags broker.Returns(&AddOffsetsToTxnResponse{ Err: tc.brokerErr, }) if errors.Is(tc.brokerErr, ErrRequestTimedOut) || errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) || errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) { broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) } if tc.brokerErr == ErrNoError { broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) broker.Returns(&TxnOffsetCommitResponse{ Topics: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: ErrNoError, }, }, }, }) } newOffsets, err := txmng.publishOffsetsToTxn(offsets, "test-group") if tc.expectedError != nil { require.Equal(t, tc.expectedError.Error(), err.Error()) } else { require.Equal(t, tc.expectedError, err) } require.Equal(t, tc.newOffsets, newOffsets) require.True(t, tc.expectedFlags&txmng.status != 0) }() } } func TestTxnOffsetsCommit(t *testing.T) { type testCase struct { brokerErr KError initialFlags ProducerTxnStatusFlag initialOffsets topicPartitionOffsets expectedFlags ProducerTxnStatusFlag expectedError error expectedOffsets topicPartitionOffsets } originalOffsets := topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, } testCases := []testCase{ { brokerErr: ErrConsumerCoordinatorNotAvailable, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrTxnOffsetCommit, ErrConsumerCoordinatorNotAvailable), expectedOffsets: originalOffsets, }, { brokerErr: ErrNotCoordinatorForConsumer, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrTxnOffsetCommit, ErrNotCoordinatorForConsumer), expectedOffsets: originalOffsets, }, { brokerErr: ErrNoError, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagInTransaction, expectedError: nil, expectedOffsets: topicPartitionOffsets{}, }, { brokerErr: ErrUnknownTopicOrPartition, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrTxnOffsetCommit, ErrUnknownTopicOrPartition), expectedOffsets: originalOffsets, }, { brokerErr: ErrOffsetsLoadInProgress, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrTxnOffsetCommit, ErrOffsetsLoadInProgress), expectedOffsets: originalOffsets, }, { brokerErr: ErrIllegalGeneration, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrIllegalGeneration, expectedOffsets: originalOffsets, }, { brokerErr: ErrUnknownMemberId, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrUnknownMemberId, expectedOffsets: originalOffsets, }, { brokerErr: ErrFencedInstancedId, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrFencedInstancedId, expectedOffsets: originalOffsets, }, { brokerErr: ErrGroupAuthorizationFailed, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrGroupAuthorizationFailed, expectedOffsets: originalOffsets, }, { brokerErr: ErrKafkaStorageError, initialFlags: ProducerTxnFlagInTransaction, initialOffsets: topicPartitionOffsets{ topicPartition{topic: "test-topic", partition: 0}: { Partition: 0, Offset: 0, }, }, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrKafkaStorageError, expectedOffsets: originalOffsets, }, } broker := NewMockBroker(t, 1) defer broker.Close() config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Transaction.Retry.Max = 0 config.Producer.Transaction.Retry.Backoff = 0 metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) for _, tc := range testCases { func() { broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) txmng.status = tc.initialFlags broker.Returns(&AddOffsetsToTxnResponse{ Err: ErrNoError, }) broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) broker.Returns(&TxnOffsetCommitResponse{ Topics: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: tc.brokerErr, }, }, }, }) if errors.Is(tc.brokerErr, ErrRequestTimedOut) || errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) || errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) { broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) } newOffsets, err := txmng.publishOffsetsToTxn(tc.initialOffsets, "test-group") if tc.expectedError != nil { require.Equal(t, tc.expectedError.Error(), err.Error()) } else { require.Equal(t, tc.expectedError, err) } require.Equal(t, tc.expectedOffsets, newOffsets) require.True(t, tc.expectedFlags&txmng.status != 0) }() } } func TestEndTxn(t *testing.T) { type testCase struct { brokerErr KError commit bool expectedFlags ProducerTxnStatusFlag expectedError error } testCases := []testCase{ { brokerErr: ErrNoError, commit: true, expectedFlags: ProducerTxnFlagReady, expectedError: nil, }, { brokerErr: ErrConsumerCoordinatorNotAvailable, commit: true, expectedFlags: ProducerTxnFlagEndTransaction, expectedError: ErrConsumerCoordinatorNotAvailable, }, { brokerErr: ErrNotCoordinatorForConsumer, commit: true, expectedFlags: ProducerTxnFlagEndTransaction, expectedError: ErrNotCoordinatorForConsumer, }, { brokerErr: ErrOffsetsLoadInProgress, commit: true, expectedFlags: ProducerTxnFlagEndTransaction, expectedError: ErrOffsetsLoadInProgress, }, { brokerErr: ErrConcurrentTransactions, commit: true, expectedFlags: ProducerTxnFlagEndTransaction, expectedError: ErrConcurrentTransactions, }, { brokerErr: ErrUnknownProducerID, commit: true, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrUnknownProducerID, }, { brokerErr: ErrInvalidProducerIDMapping, commit: true, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrInvalidProducerIDMapping, }, } broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Transaction.Retry.Max = 0 config.Producer.Transaction.Retry.Backoff = 0 for _, tc := range testCases { func() { broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) txmng.status = ProducerTxnFlagEndTransaction endTxnResponse := &EndTxnResponse{ Err: tc.brokerErr, ThrottleTime: 0, } broker.Returns(endTxnResponse) if errors.Is(tc.brokerErr, ErrRequestTimedOut) || errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) || errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) { broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) } err = txmng.endTxn(tc.commit) require.Equal(t, tc.expectedError, err) require.True(t, txmng.currentTxnStatus()&tc.expectedFlags != 0) }() } } func TestPublishPartitionToTxn(t *testing.T) { type testCase struct { brokerErr KError expectedFlags ProducerTxnStatusFlag expectedError error expectedPendingPartitions topicPartitionSet expectedPartitionsInTxn topicPartitionSet } initialPendingTopicPartitionSet := topicPartitionSet{ { topic: "test-topic", partition: 0, }: struct{}{}, } testCases := []testCase{ { brokerErr: ErrNoError, expectedFlags: ProducerTxnFlagInTransaction, expectedError: nil, expectedPendingPartitions: topicPartitionSet{}, expectedPartitionsInTxn: initialPendingTopicPartitionSet, }, { brokerErr: ErrConsumerCoordinatorNotAvailable, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrAddPartitionsToTxn, ErrConsumerCoordinatorNotAvailable), expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: initialPendingTopicPartitionSet, }, { brokerErr: ErrNotCoordinatorForConsumer, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrAddPartitionsToTxn, ErrNotCoordinatorForConsumer), expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: initialPendingTopicPartitionSet, }, { brokerErr: ErrUnknownTopicOrPartition, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrAddPartitionsToTxn, ErrUnknownTopicOrPartition), expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: initialPendingTopicPartitionSet, }, { brokerErr: ErrOffsetsLoadInProgress, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrAddPartitionsToTxn, ErrOffsetsLoadInProgress), expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: initialPendingTopicPartitionSet, }, { brokerErr: ErrConcurrentTransactions, expectedFlags: ProducerTxnFlagInTransaction, expectedError: Wrap(ErrAddPartitionsToTxn, ErrConcurrentTransactions), expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: initialPendingTopicPartitionSet, }, { brokerErr: ErrOperationNotAttempted, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrOperationNotAttempted, expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: topicPartitionSet{}, }, { brokerErr: ErrTopicAuthorizationFailed, expectedFlags: ProducerTxnFlagAbortableError, expectedError: ErrTopicAuthorizationFailed, expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: topicPartitionSet{}, }, { brokerErr: ErrUnknownProducerID, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrUnknownProducerID, expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: topicPartitionSet{}, }, { brokerErr: ErrInvalidProducerIDMapping, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrInvalidProducerIDMapping, expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: topicPartitionSet{}, }, { brokerErr: ErrKafkaStorageError, expectedFlags: ProducerTxnFlagFatalError, expectedError: ErrKafkaStorageError, expectedPartitionsInTxn: topicPartitionSet{}, expectedPendingPartitions: topicPartitionSet{}, }, } broker := NewMockBroker(t, 1) defer broker.Close() metadataLeader := new(MetadataResponse) metadataLeader.Version = 4 metadataLeader.ControllerID = broker.brokerID metadataLeader.AddBroker(broker.Addr(), broker.BrokerID()) metadataLeader.AddTopic("test-topic", ErrNoError) metadataLeader.AddTopicPartition("test-topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) config := NewTestConfig() config.Producer.Idempotent = true config.Producer.Transaction.ID = "test" config.Version = V0_11_0_0 config.Producer.RequiredAcks = WaitForAll config.Net.MaxOpenRequests = 1 config.Producer.Transaction.Retry.Max = 0 config.Producer.Transaction.Retry.Backoff = 0 for _, tc := range testCases { func() { broker.Returns(metadataLeader) client, err := NewClient([]string{broker.Addr()}, config) require.NoError(t, err) defer client.Close() findCoordinatorResponse := FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, } broker.Returns(&findCoordinatorResponse) producerIdResponse := &InitProducerIDResponse{ Err: ErrNoError, ProducerID: 1, ProducerEpoch: 0, } broker.Returns(producerIdResponse) txmng, err := newTransactionManager(config, client) require.NoError(t, err) txmng.status = ProducerTxnFlagInTransaction txmng.pendingPartitionsInCurrentTxn = topicPartitionSet{ { topic: "test-topic", partition: 0, }: struct{}{}, } broker.Returns(&AddPartitionsToTxnResponse{ Errors: map[string][]*PartitionError{ "test-topic": { { Partition: 0, Err: tc.brokerErr, }, }, }, }) if errors.Is(tc.brokerErr, ErrRequestTimedOut) || errors.Is(tc.brokerErr, ErrConsumerCoordinatorNotAvailable) || errors.Is(tc.brokerErr, ErrNotCoordinatorForConsumer) { broker.Returns(&FindCoordinatorResponse{ Coordinator: client.Brokers()[0], Err: ErrNoError, Version: 1, }) } err = txmng.publishTxnPartitions() if tc.expectedError != nil { require.Equal(t, tc.expectedError.Error(), err.Error(), tc) } else { require.Equal(t, tc.expectedError, err, tc) } require.True(t, txmng.status&tc.expectedFlags != 0, tc) require.Equal(t, tc.expectedPartitionsInTxn, txmng.partitionsInCurrentTxn, tc) require.Equal(t, tc.expectedPendingPartitions, txmng.pendingPartitionsInCurrentTxn, tc) }() } } golang-github-ibm-sarama-1.43.2/txn_offset_commit_request.go000066400000000000000000000065471461256741300242120ustar00rootroot00000000000000package sarama type TxnOffsetCommitRequest struct { Version int16 TransactionalID string GroupID string ProducerID int64 ProducerEpoch int16 Topics map[string][]*PartitionOffsetMetadata } func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { if err := pe.putString(t.TransactionalID); err != nil { return err } if err := pe.putString(t.GroupID); err != nil { return err } pe.putInt64(t.ProducerID) pe.putInt16(t.ProducerEpoch) if err := pe.putArrayLength(len(t.Topics)); err != nil { return err } for topic, partitions := range t.Topics { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(partitions)); err != nil { return err } for _, partition := range partitions { if err := partition.encode(pe, t.Version); err != nil { return err } } } return nil } func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { t.Version = version if t.TransactionalID, err = pd.getString(); err != nil { return err } if t.GroupID, err = pd.getString(); err != nil { return err } if t.ProducerID, err = pd.getInt64(); err != nil { return err } if t.ProducerEpoch, err = pd.getInt16(); err != nil { return err } n, err := pd.getArrayLength() if err != nil { return err } t.Topics = make(map[string][]*PartitionOffsetMetadata) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } m, err := pd.getArrayLength() if err != nil { return err } t.Topics[topic] = make([]*PartitionOffsetMetadata, m) for j := 0; j < m; j++ { partitionOffsetMetadata := new(PartitionOffsetMetadata) if err := partitionOffsetMetadata.decode(pd, version); err != nil { return err } t.Topics[topic][j] = partitionOffsetMetadata } } return nil } func (a *TxnOffsetCommitRequest) key() int16 { return 28 } func (a *TxnOffsetCommitRequest) version() int16 { return a.Version } func (a *TxnOffsetCommitRequest) headerVersion() int16 { return 1 } func (a *TxnOffsetCommitRequest) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_1_0_0 case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_1_0_0 } } type PartitionOffsetMetadata struct { // Partition contains the index of the partition within the topic. Partition int32 // Offset contains the message offset to be committed. Offset int64 // LeaderEpoch contains the leader epoch of the last consumed record. LeaderEpoch int32 // Metadata contains any associated metadata the client wants to keep. Metadata *string } func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error { pe.putInt32(p.Partition) pe.putInt64(p.Offset) if version >= 2 { pe.putInt32(p.LeaderEpoch) } if err := pe.putNullableString(p.Metadata); err != nil { return err } return nil } func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { if p.Partition, err = pd.getInt32(); err != nil { return err } if p.Offset, err = pd.getInt64(); err != nil { return err } if version >= 2 { if p.LeaderEpoch, err = pd.getInt32(); err != nil { return err } } if p.Metadata, err = pd.getNullableString(); err != nil { return err } return nil } golang-github-ibm-sarama-1.43.2/txn_offset_commit_request_test.go000066400000000000000000000027731461256741300252460ustar00rootroot00000000000000package sarama import "testing" var ( txnOffsetCommitRequest = []byte{ 0, 3, 't', 'x', 'n', 0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd', 0, 0, 0, 0, 0, 0, 31, 64, // producer ID 0, 1, // producer epoch 0, 0, 0, 1, // 1 topic 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, // 1 partition 0, 0, 0, 2, // partition no 2 0, 0, 0, 0, 0, 0, 0, 123, 255, 255, // no meta data } txnOffsetCommitRequestV2 = []byte{ 0, 3, 't', 'x', 'n', 0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd', 0, 0, 0, 0, 0, 0, 31, 64, // producer ID 0, 1, // producer epoch 0, 0, 0, 1, // 1 topic 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, // 1 partition 0, 0, 0, 2, // partition no 2 0, 0, 0, 0, 0, 0, 0, 123, 0, 0, 0, 9, // leader epoch 255, 255, // no meta data } ) func TestTxnOffsetCommitRequest(t *testing.T) { req := &TxnOffsetCommitRequest{ TransactionalID: "txn", GroupID: "groupid", ProducerID: 8000, ProducerEpoch: 1, Topics: map[string][]*PartitionOffsetMetadata{ "topic": {{ Offset: 123, Partition: 2, }}, }, } testRequest(t, "V0", req, txnOffsetCommitRequest) } func TestTxnOffsetCommitRequestV2(t *testing.T) { req := &TxnOffsetCommitRequest{ Version: 2, TransactionalID: "txn", GroupID: "groupid", ProducerID: 8000, ProducerEpoch: 1, Topics: map[string][]*PartitionOffsetMetadata{ "topic": {{ Offset: 123, Partition: 2, LeaderEpoch: 9, }}, }, } testRequest(t, "V2", req, txnOffsetCommitRequestV2) } golang-github-ibm-sarama-1.43.2/txn_offset_commit_response.go000066400000000000000000000037371461256741300243560ustar00rootroot00000000000000package sarama import ( "time" ) type TxnOffsetCommitResponse struct { Version int16 ThrottleTime time.Duration Topics map[string][]*PartitionError } func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) if err := pe.putArrayLength(len(t.Topics)); err != nil { return err } for topic, e := range t.Topics { if err := pe.putString(topic); err != nil { return err } if err := pe.putArrayLength(len(e)); err != nil { return err } for _, partitionError := range e { if err := partitionError.encode(pe); err != nil { return err } } } return nil } func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { t.Version = version throttleTime, err := pd.getInt32() if err != nil { return err } t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } t.Topics = make(map[string][]*PartitionError) for i := 0; i < n; i++ { topic, err := pd.getString() if err != nil { return err } m, err := pd.getArrayLength() if err != nil { return err } t.Topics[topic] = make([]*PartitionError, m) for j := 0; j < m; j++ { t.Topics[topic][j] = new(PartitionError) if err := t.Topics[topic][j].decode(pd, version); err != nil { return err } } } return nil } func (a *TxnOffsetCommitResponse) key() int16 { return 28 } func (a *TxnOffsetCommitResponse) version() int16 { return a.Version } func (a *TxnOffsetCommitResponse) headerVersion() int16 { return 0 } func (a *TxnOffsetCommitResponse) isValidVersion() bool { return a.Version >= 0 && a.Version <= 2 } func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { switch a.Version { case 2: return V2_1_0_0 case 1: return V2_0_0_0 case 0: return V0_11_0_0 default: return V2_1_0_0 } } func (r *TxnOffsetCommitResponse) throttleTime() time.Duration { return r.ThrottleTime } golang-github-ibm-sarama-1.43.2/txn_offset_commit_response_test.go000066400000000000000000000010511461256741300254000ustar00rootroot00000000000000package sarama import ( "testing" "time" ) var txnOffsetCommitResponse = []byte{ 0, 0, 0, 100, 0, 0, 0, 1, // 1 topic 0, 5, 't', 'o', 'p', 'i', 'c', 0, 0, 0, 1, // 1 partition response 0, 0, 0, 2, // partition number 2 0, 47, // err } func TestTxnOffsetCommitResponse(t *testing.T) { resp := &TxnOffsetCommitResponse{ ThrottleTime: 100 * time.Millisecond, Topics: map[string][]*PartitionError{ "topic": {{ Partition: 2, Err: ErrInvalidProducerEpoch, }}, }, } testResponse(t, "", resp, txnOffsetCommitResponse) } golang-github-ibm-sarama-1.43.2/utils.go000066400000000000000000000201051461256741300200350ustar00rootroot00000000000000package sarama import ( "bufio" "fmt" "net" "regexp" ) type none struct{} // make []int32 sortable so we can sort partition numbers type int32Slice []int32 func (slice int32Slice) Len() int { return len(slice) } func (slice int32Slice) Less(i, j int) bool { return slice[i] < slice[j] } func (slice int32Slice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) ret = append(ret, input...) return ret } func withRecover(fn func()) { defer func() { handler := PanicHandler if handler != nil { if err := recover(); err != nil { handler(err) } } }() fn() } func safeAsyncClose(b *Broker) { tmp := b // local var prevents clobbering in goroutine go withRecover(func() { if connected, _ := tmp.Connected(); connected { if err := tmp.Close(); err != nil { Logger.Println("Error closing broker", tmp.ID(), ":", err) } } }) } // Encoder is a simple interface for any type that can be encoded as an array of bytes // in order to be sent as the key or value of a Kafka message. Length() is provided as an // optimization, and must return the same as len() on the result of Encode(). type Encoder interface { Encode() ([]byte, error) Length() int } // make strings and byte slices encodable for convenience so they can be used as keys // and/or values in kafka messages // StringEncoder implements the Encoder interface for Go strings so that they can be used // as the Key or Value in a ProducerMessage. type StringEncoder string func (s StringEncoder) Encode() ([]byte, error) { return []byte(s), nil } func (s StringEncoder) Length() int { return len(s) } // ByteEncoder implements the Encoder interface for Go byte slices so that they can be used // as the Key or Value in a ProducerMessage. type ByteEncoder []byte func (b ByteEncoder) Encode() ([]byte, error) { return b, nil } func (b ByteEncoder) Length() int { return len(b) } // bufConn wraps a net.Conn with a buffer for reads to reduce the number of // reads that trigger syscalls. type bufConn struct { net.Conn buf *bufio.Reader } func newBufConn(conn net.Conn) *bufConn { return &bufConn{ Conn: conn, buf: bufio.NewReader(conn), } } func (bc *bufConn) Read(b []byte) (n int, err error) { return bc.buf.Read(b) } // KafkaVersion instances represent versions of the upstream Kafka broker. type KafkaVersion struct { // it's a struct rather than just typing the array directly to make it opaque and stop people // generating their own arbitrary versions version [4]uint } func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { return KafkaVersion{ version: [4]uint{major, minor, veryMinor, patch}, } } // IsAtLeast return true if and only if the version it is called on is // greater than or equal to the version passed in: // // V1.IsAtLeast(V2) // false // V2.IsAtLeast(V1) // true func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { for i := range v.version { if v.version[i] > other.version[i] { return true } else if v.version[i] < other.version[i] { return false } } return true } // Effective constants defining the supported kafka versions. var ( V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) V0_10_2_2 = newKafkaVersion(0, 10, 2, 2) V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) V1_0_1_0 = newKafkaVersion(1, 0, 1, 0) V1_0_2_0 = newKafkaVersion(1, 0, 2, 0) V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) V2_1_1_0 = newKafkaVersion(2, 1, 1, 0) V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) V2_2_1_0 = newKafkaVersion(2, 2, 1, 0) V2_2_2_0 = newKafkaVersion(2, 2, 2, 0) V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) V2_3_1_0 = newKafkaVersion(2, 3, 1, 0) V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) V2_4_1_0 = newKafkaVersion(2, 4, 1, 0) V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) V2_5_1_0 = newKafkaVersion(2, 5, 1, 0) V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) V2_6_1_0 = newKafkaVersion(2, 6, 1, 0) V2_6_2_0 = newKafkaVersion(2, 6, 2, 0) V2_6_3_0 = newKafkaVersion(2, 6, 3, 0) V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) V2_7_1_0 = newKafkaVersion(2, 7, 1, 0) V2_7_2_0 = newKafkaVersion(2, 7, 2, 0) V2_8_0_0 = newKafkaVersion(2, 8, 0, 0) V2_8_1_0 = newKafkaVersion(2, 8, 1, 0) V2_8_2_0 = newKafkaVersion(2, 8, 2, 0) V3_0_0_0 = newKafkaVersion(3, 0, 0, 0) V3_0_1_0 = newKafkaVersion(3, 0, 1, 0) V3_0_2_0 = newKafkaVersion(3, 0, 2, 0) V3_1_0_0 = newKafkaVersion(3, 1, 0, 0) V3_1_1_0 = newKafkaVersion(3, 1, 1, 0) V3_1_2_0 = newKafkaVersion(3, 1, 2, 0) V3_2_0_0 = newKafkaVersion(3, 2, 0, 0) V3_2_1_0 = newKafkaVersion(3, 2, 1, 0) V3_2_2_0 = newKafkaVersion(3, 2, 2, 0) V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) V3_4_0_0 = newKafkaVersion(3, 4, 0, 0) V3_4_1_0 = newKafkaVersion(3, 4, 1, 0) V3_5_0_0 = newKafkaVersion(3, 5, 0, 0) V3_5_1_0 = newKafkaVersion(3, 5, 1, 0) V3_6_0_0 = newKafkaVersion(3, 6, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, V0_8_2_1, V0_8_2_2, V0_9_0_0, V0_9_0_1, V0_10_0_0, V0_10_0_1, V0_10_1_0, V0_10_1_1, V0_10_2_0, V0_10_2_1, V0_10_2_2, V0_11_0_0, V0_11_0_1, V0_11_0_2, V1_0_0_0, V1_0_1_0, V1_0_2_0, V1_1_0_0, V1_1_1_0, V2_0_0_0, V2_0_1_0, V2_1_0_0, V2_1_1_0, V2_2_0_0, V2_2_1_0, V2_2_2_0, V2_3_0_0, V2_3_1_0, V2_4_0_0, V2_4_1_0, V2_5_0_0, V2_5_1_0, V2_6_0_0, V2_6_1_0, V2_6_2_0, V2_7_0_0, V2_7_1_0, V2_8_0_0, V2_8_1_0, V2_8_2_0, V3_0_0_0, V3_0_1_0, V3_0_2_0, V3_1_0_0, V3_1_1_0, V3_1_2_0, V3_2_0_0, V3_2_1_0, V3_2_2_0, V3_2_3_0, V3_3_0_0, V3_3_1_0, V3_3_2_0, V3_4_0_0, V3_4_1_0, V3_5_0_0, V3_5_1_0, V3_6_0_0, } MinVersion = V0_8_2_0 MaxVersion = V3_6_0_0 DefaultVersion = V2_1_0_0 // reduced set of protocol versions to matrix test fvtRangeVersions = []KafkaVersion{ V0_8_2_2, V0_10_2_2, V1_0_2_0, V1_1_1_0, V2_0_1_0, V2_2_2_0, V2_4_1_0, V2_6_2_0, V2_8_2_0, V3_1_2_0, V3_3_2_0, V3_6_0_0, } ) var ( // This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3 validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`) // This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0 validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`) ) // ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { return DefaultVersion, fmt.Errorf("invalid version `%s`", s) } var major, minor, veryMinor, patch uint var err error if s[0] == '0' { err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) } else { err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) } if err != nil { return DefaultVersion, err } return newKafkaVersion(major, minor, veryMinor, patch), nil } func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error { if !pattern.MatchString(s) { return fmt.Errorf("invalid version `%s`", s) } _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) return err } func (v KafkaVersion) String() string { if v.version[0] == 0 { return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) } return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) } golang-github-ibm-sarama-1.43.2/utils_test.go000066400000000000000000000031761461256741300211050ustar00rootroot00000000000000package sarama import "testing" func TestVersionCompare(t *testing.T) { if V0_8_2_0.IsAtLeast(V0_8_2_1) { t.Error("0.8.2.0 >= 0.8.2.1") } if !V0_8_2_1.IsAtLeast(V0_8_2_0) { t.Error("! 0.8.2.1 >= 0.8.2.0") } if !V0_8_2_0.IsAtLeast(V0_8_2_0) { t.Error("! 0.8.2.0 >= 0.8.2.0") } if !V0_9_0_0.IsAtLeast(V0_8_2_1) { t.Error("! 0.9.0.0 >= 0.8.2.1") } if V0_8_2_1.IsAtLeast(V0_10_0_0) { t.Error("0.8.2.1 >= 0.10.0.0") } if !V1_0_0_0.IsAtLeast(V0_9_0_0) { t.Error("! 1.0.0.0 >= 0.9.0.0") } if V0_9_0_0.IsAtLeast(V1_0_0_0) { t.Error("0.9.0.0 >= 1.0.0.0") } } func TestVersionParsing(t *testing.T) { validVersions := []string{ "0.8.2.0", "0.8.2.1", "0.8.2.2", "0.9.0.0", "0.9.0.1", "0.10.0.0", "0.10.0.1", "0.10.1.0", "0.10.1.1", "0.10.2.0", "0.10.2.1", "0.10.2.2", "0.11.0.0", "0.11.0.1", "0.11.0.2", "1.0.0", "1.0.1", "1.0.2", "1.1.0", "1.1.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.2.0", "2.2.1", "2.2.2", "2.3.0", "2.3.1", "2.4.0", "2.4.1", "2.5.0", "2.5.1", "2.6.0", "2.6.1", "2.6.2", "2.6.3", "2.7.0", "2.7.1", "2.7.2", "2.8.0", "2.8.1", "3.0.0", "3.0.1", "3.1.0", "3.1.1", "3.2.0", } for _, s := range validVersions { v, err := ParseKafkaVersion(s) if err != nil { t.Errorf("could not parse valid version %s: %s", s, err) } if v.String() != s { t.Errorf("version %s != %s", v.String(), s) } } invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"} for _, s := range invalidVersions { if _, err := ParseKafkaVersion(s); err == nil { t.Errorf("invalid version %s parsed without error", s) } } } golang-github-ibm-sarama-1.43.2/version.go000066400000000000000000000007171461256741300203710ustar00rootroot00000000000000package sarama import ( "runtime/debug" "sync" ) var ( v string vOnce sync.Once ) func version() string { vOnce.Do(func() { bi, ok := debug.ReadBuildInfo() if ok { v = bi.Main.Version } if v == "" || v == "(devel)" { // if we can't read a go module version then they're using a git // clone or vendored module so all we can do is report "dev" for // the version to make a valid ApiVersions request v = "dev" } }) return v } golang-github-ibm-sarama-1.43.2/zstd.go000066400000000000000000000036551461256741300176740ustar00rootroot00000000000000package sarama import ( "sync" "github.com/klauspost/compress/zstd" ) // zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders // If the pool of encoders is exhausted then new encoders will be created on the fly const zstdMaxBufferedEncoders = 1 type ZstdEncoderParams struct { Level int } type ZstdDecoderParams struct { } var zstdDecMap sync.Map var zstdAvailableEncoders sync.Map func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder { if c, ok := zstdAvailableEncoders.Load(params); ok { return c.(chan *zstd.Encoder) } c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders)) return c.(chan *zstd.Encoder) } func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder { select { case enc := <-getZstdEncoderChannel(params): return enc default: encoderLevel := zstd.SpeedDefault if params.Level != CompressionLevelDefault { encoderLevel = zstd.EncoderLevelFromZstd(params.Level) } zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), zstd.WithEncoderLevel(encoderLevel), zstd.WithEncoderConcurrency(1)) return zstdEnc } } func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) { select { case getZstdEncoderChannel(params) <- enc: default: } } func getDecoder(params ZstdDecoderParams) *zstd.Decoder { if ret, ok := zstdDecMap.Load(params); ok { return ret.(*zstd.Decoder) } // It's possible to race and create multiple new readers. // Only one will survive GC after use. zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) zstdDecMap.Store(params, zstdDec) return zstdDec } func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) { return getDecoder(params).DecodeAll(src, dst) } func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) { enc := getZstdEncoder(params) out := enc.EncodeAll(src, dst) releaseEncoder(params, enc) return out, nil } golang-github-ibm-sarama-1.43.2/zstd_test.go000066400000000000000000000011361461256741300207230ustar00rootroot00000000000000package sarama import ( "runtime" "testing" ) func BenchmarkZstdMemoryConsumption(b *testing.B) { params := ZstdEncoderParams{Level: 9} buf := make([]byte, 1024*1024) for i := 0; i < len(buf); i++ { buf[i] = byte((i / 256) + (i * 257)) } cpus := 96 gomaxprocsBackup := runtime.GOMAXPROCS(cpus) b.ReportAllocs() for i := 0; i < b.N; i++ { for j := 0; j < 2*cpus; j++ { _, _ = zstdCompress(params, nil, buf) } // drain the buffered encoder getZstdEncoder(params) // previously this would be achieved with // zstdEncMap.Delete(params) } runtime.GOMAXPROCS(gomaxprocsBackup) }